Commit 2e88d948 authored by Sandeep's avatar Sandeep Committed by Robert Kimball

hybrid at core (#1821)

* skeleton backend

* Code owner from if conditioning

* add simple placement for interpreter and register pass in hybrid

* placement policy applied

* clone the function if needed

* split the function

* Compile subfunctions in corresponding backends

* hybrid backed works as is for abc test

* cleanup

* add placement policy for CPU

* cleanup a little

* add simple op cost method to backend

* enable CPU pass via flag

* address clang-format PR issue

* reslove build

* clean-up

* update manifest

* disable HYBRID as default build

* style

* addressing offline discussion

* more offline discussion
parent cf15ef32
...@@ -92,6 +92,7 @@ option(NGRAPH_CPU_ENABLE "Control the building of the CPU backend" TRUE) ...@@ -92,6 +92,7 @@ option(NGRAPH_CPU_ENABLE "Control the building of the CPU backend" TRUE)
option(NGRAPH_INTELGPU_ENABLE "Control the building of the Intel GPU backend with clDNN" FALSE) option(NGRAPH_INTELGPU_ENABLE "Control the building of the Intel GPU backend with clDNN" FALSE)
option(NGRAPH_GPU_ENABLE "Control the building of the GPU backend" FALSE) option(NGRAPH_GPU_ENABLE "Control the building of the GPU backend" FALSE)
option(NGRAPH_INTERPRETER_ENABLE "Control the building of the INTERPRETER backend" TRUE) option(NGRAPH_INTERPRETER_ENABLE "Control the building of the INTERPRETER backend" TRUE)
option(NGRAPH_HYBRID_ENABLE "Control the building of the HYBRID backend" FALSE)
option(NGRAPH_DISTRIBUTED_ENABLE "Add distributed mode to the CPU backend" FALSE) option(NGRAPH_DISTRIBUTED_ENABLE "Add distributed mode to the CPU backend" FALSE)
option(NGRAPH_DEBUG_ENABLE "Enable output for NGRAPH_DEBUG statements" FALSE) option(NGRAPH_DEBUG_ENABLE "Enable output for NGRAPH_DEBUG statements" FALSE)
option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" FALSE) option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" FALSE)
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
/src/ngraph/runtime/ @rkimballn1 @Krovatkin /src/ngraph/runtime/ @rkimballn1 @Krovatkin
/src/ngraph/runtime/cpu/ @jbobba /src/ngraph/runtime/cpu/ @jbobba
/src/ngraph/runtime/gpu/ @rkimballn1 /src/ngraph/runtime/gpu/ @rkimballn1
/src/ngraph/runtime/hybrid/ @sasadep
/src/ngraph/runtime/intelgpu/ @shssf /src/ngraph/runtime/intelgpu/ @shssf
/src/ngraph/runtime/interpreter/ @rkimballn1 /src/ngraph/runtime/interpreter/ @rkimballn1
/src/ngraph/runtime/reference/ @aprocter /src/ngraph/runtime/reference/ @aprocter
......
...@@ -16,6 +16,11 @@ ...@@ -16,6 +16,11 @@
add_subdirectory(interpreter) add_subdirectory(interpreter)
if (NGRAPH_HYBRID_ENABLE)
add_subdirectory(hybrid)
endif()
if (NGRAPH_CPU_ENABLE) if (NGRAPH_CPU_ENABLE)
add_subdirectory(cpu) add_subdirectory(cpu)
endif() endif()
......
...@@ -107,3 +107,10 @@ void runtime::Backend::validate_call(shared_ptr<const Function> function, ...@@ -107,3 +107,10 @@ void runtime::Backend::validate_call(shared_ptr<const Function> function,
} }
} }
} }
bool runtime::Backend::is_supported(const Node& node) const
{
// The default behavior is that a backend fully supports all ops. If this is not the case
// then override this method and enhance.
return false;
}
...@@ -118,6 +118,11 @@ public: ...@@ -118,6 +118,11 @@ public:
virtual std::vector<PerformanceCounter> virtual std::vector<PerformanceCounter>
get_performance_data(std::shared_ptr<Function> func) const; get_performance_data(std::shared_ptr<Function> func) const;
/// \brief Test if a backend is capable of supporting an op
/// \param node is the op to test.
/// \returns true if the op is supported, false otherwise.
virtual bool is_supported(const Node& node) const;
protected: protected:
void validate_call(std::shared_ptr<const Function> func, void validate_call(std::shared_ptr<const Function> func,
const std::vector<std::shared_ptr<runtime::Tensor>>& outputs, const std::vector<std::shared_ptr<runtime::Tensor>>& outputs,
......
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
if (NGRAPH_HYBRID_ENABLE)
add_library(hybrid_backend SHARED hybrid_backend.cpp)
set_target_properties(hybrid_backend PROPERTIES VERSION ${NGRAPH_VERSION})
target_link_libraries(hybrid_backend PUBLIC ngraph)
set_target_properties(hybrid_backend PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${NGRAPH_BUILD_DIR})
install(TARGETS hybrid_backend
LIBRARY DESTINATION "${NGRAPH_INSTALL_LIB}"
ARCHIVE DESTINATION "${NGRAPH_INSTALL_LIB}"
)
endif()
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <memory>
#include <sstream>
#include <string>
#include <typeindex>
#include <typeinfo>
#include <vector>
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/except.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/pass/assign_layout.hpp"
#include "ngraph/pass/assign_placement.hpp"
#include "ngraph/pass/like_replacement.hpp"
#include "ngraph/pass/liveness.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/hybrid/hybrid_backend.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
using descriptor::layout::DenseTensorLayout;
extern "C" const char* get_ngraph_version_string()
{
return NGRAPH_VERSION;
}
extern "C" runtime::Backend* new_backend(const char* configuration_string)
{
return new runtime::hybrid::HYBRIDBackend();
}
extern "C" void delete_backend(runtime::Backend* backend)
{
delete backend;
}
template <typename T>
void copy_data(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& data)
{
size_t data_size = data.size() * sizeof(T);
tv->write(data.data(), 0, data_size);
}
template <typename T>
std::vector<T> read_vector(std::shared_ptr<ngraph::runtime::Tensor> tv)
{
if (ngraph::element::from<T>() != tv->get_tensor_layout()->get_element_type())
{
throw std::invalid_argument("read_vector type must match Tensor type");
}
size_t element_count = ngraph::shape_size(tv->get_shape());
size_t size = element_count * sizeof(T);
std::vector<T> rc(element_count);
tv->read(rc.data(), 0, size);
return rc;
}
shared_ptr<runtime::Backend> runtime::hybrid::HYBRIDBackend::get_cached_backend(Placement placement)
{
if (m_cached_backends.find(placement) == m_cached_backends.end())
{
m_cached_backends[placement] = runtime::Backend::create(placement_to_string(placement));
}
return m_cached_backends.at(placement);
}
shared_ptr<runtime::Tensor> runtime::hybrid::HYBRIDBackend::create_tensor(const element::Type& type,
const Shape& shape)
{
return make_shared<runtime::HostTensor>(type, shape, "external");
}
shared_ptr<runtime::Tensor> runtime::hybrid::HYBRIDBackend::create_tensor(const element::Type& type,
const Shape& shape,
void* memory_pointer)
{
return make_shared<runtime::HostTensor>(type, shape, memory_pointer, "external");
}
bool runtime::hybrid::HYBRIDBackend::compile(shared_ptr<Function> function)
{
if (m_function_map.find(function) == m_function_map.end())
{
// Clone function
FunctionInstance instance;
instance.m_function = clone_function(*function);
pass::Manager pass_manager;
pass_manager.run_passes(instance.m_function);
}
return true;
}
bool runtime::hybrid::HYBRIDBackend::call(shared_ptr<Function> function,
const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::Tensor>>& inputs)
{
validate_call(function, outputs, inputs);
compile(function);
return true;
}
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/tensor.hpp"
namespace ngraph
{
namespace runtime
{
namespace hybrid
{
class HYBRIDBackend : public runtime::Backend
{
public:
std::shared_ptr<Tensor> create_tensor(const element::Type& type,
const Shape& shape,
void* memory_pointer) override;
std::shared_ptr<Tensor> create_tensor(const element::Type& type,
const Shape& shape) override;
bool compile(std::shared_ptr<Function> function) override;
bool call(std::shared_ptr<Function> function,
const std::vector<std::shared_ptr<Tensor>>& outputs,
const std::vector<std::shared_ptr<Tensor>>& intputs) override;
private:
class FunctionInstance
{
public:
std::shared_ptr<Function> m_function;
std::vector<std::shared_ptr<Function>> m_sub_functions;
std::unordered_map<std::shared_ptr<op::Parameter>, std::shared_ptr<op::Result>>
m_map_parameter_to_result;
};
std::shared_ptr<runtime::Backend> get_cached_backend(Placement placement);
std::map<Placement, std::shared_ptr<runtime::Backend>> m_cached_backends;
std::map<std::shared_ptr<Function>, FunctionInstance> m_function_map;
};
}
}
}
This diff is collapsed.
...@@ -65,6 +65,10 @@ if (NGRAPH_INTERPRETER_ENABLE) ...@@ -65,6 +65,10 @@ if (NGRAPH_INTERPRETER_ENABLE)
set(ACTIVE_BACKEND_LIST ${ACTIVE_BACKEND_LIST} INTERPRETER) set(ACTIVE_BACKEND_LIST ${ACTIVE_BACKEND_LIST} INTERPRETER)
endif() endif()
if (NGRAPH_HYBRID_ENABLE)
set(SRC ${SRC} hybrid_backend.cpp)
endif()
if (NGRAPH_CPU_ENABLE) if (NGRAPH_CPU_ENABLE)
list(APPEND SRC core_fusion.cpp quantize_cpu.cpp) list(APPEND SRC core_fusion.cpp quantize_cpu.cpp)
list(APPEND SRC backend_performance.cpp cpu_fusion.cpp cpu_test.cpp cpu_reshape_sinking.cpp) list(APPEND SRC backend_performance.cpp cpu_fusion.cpp cpu_test.cpp cpu_reshape_sinking.cpp)
...@@ -83,6 +87,10 @@ if (NGRAPH_INTELGPU_ENABLE) ...@@ -83,6 +87,10 @@ if (NGRAPH_INTELGPU_ENABLE)
set(ACTIVE_BACKEND_LIST ${ACTIVE_BACKEND_LIST} INTELGPU) set(ACTIVE_BACKEND_LIST ${ACTIVE_BACKEND_LIST} INTELGPU)
endif() endif()
if (NGRAPH_HYBRID_ENABLE)
set(ACTIVE_BACKEND_LIST ${ACTIVE_BACKEND_LIST} HYBRID)
endif()
add_subdirectory(models) add_subdirectory(models)
add_subdirectory(files) add_subdirectory(files)
add_subdirectory(util) add_subdirectory(util)
...@@ -179,6 +187,10 @@ if (NGRAPH_INTERPRETER_ENABLE) ...@@ -179,6 +187,10 @@ if (NGRAPH_INTERPRETER_ENABLE)
target_link_libraries(unit-test PRIVATE interpreter_backend) target_link_libraries(unit-test PRIVATE interpreter_backend)
endif() endif()
if (NGRAPH_HYBRID_ENABLE)
target_link_libraries(unit-test PRIVATE hybrid_backend)
endif()
if (NGRAPH_GPU_ENABLE) if (NGRAPH_GPU_ENABLE)
target_link_libraries(unit-test PRIVATE gpu_backend) target_link_libraries(unit-test PRIVATE gpu_backend)
endif() endif()
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment