Commit 033be233 authored by Ewa Tusień's avatar Ewa Tusień Committed by Scott Cyphers

[ONNX] Remove support for ONNXIFI (#4190)

* Removed ONNXIFI files and all files associated with ONNXIFI support.

* Changed docstrings.
Co-authored-by: 's avatarScott Cyphers <diyessi@users.noreply.github.com>
parent d9a9ae69
...@@ -182,7 +182,6 @@ endif() ...@@ -182,7 +182,6 @@ endif()
if (NGRAPH_ONNX_IMPORT_ENABLE) if (NGRAPH_ONNX_IMPORT_ENABLE)
option(NGRAPH_USE_SYSTEM_PROTOBUF "Use system provided Protobuf shared object" FALSE) option(NGRAPH_USE_SYSTEM_PROTOBUF "Use system provided Protobuf shared object" FALSE)
option(NGRAPH_ONNXIFI_ENABLE "Enable ONNX Interface for Framework Integration" TRUE)
endif() endif()
if (NOT NGRAPH_JSON_ENABLE) if (NOT NGRAPH_JSON_ENABLE)
......
...@@ -41,7 +41,6 @@ project/doc-contributor-README.rst @indie ...@@ -41,7 +41,6 @@ project/doc-contributor-README.rst @indie
/src/ngraph/codegen/ @rkimballn1 /src/ngraph/codegen/ @rkimballn1
/src/ngraph/distributed.* @wenzhe-nrv @diyessi /src/ngraph/distributed.* @wenzhe-nrv @diyessi
/src/ngraph/frontend/fluid/ @silee2 /src/ngraph/frontend/fluid/ @silee2
/src/ngraph/frontend/onnxifi/ @postrational
/src/ngraph/frontend/onnx_import/ @postrational /src/ngraph/frontend/onnx_import/ @postrational
/src/ngraph/op/ @diyessi /src/ngraph/op/ @diyessi
/src/ngraph/op/allreduce.*pp @wenzhe-nrv @diyessi /src/ngraph/op/allreduce.*pp @wenzhe-nrv @diyessi
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
One of nGraph’s key features is framework neutrality. We currently support One of nGraph’s key features is framework neutrality. We currently support
popular deep learning frameworks such as TensorFlow and MXNet with stable popular deep learning frameworks such as TensorFlow and MXNet with stable
bridges to pass computational graphs to nGraph. Additionally nGraph bridges to pass computational graphs to nGraph. Additionally nGraph
Compiler has functional bridges to PaddlePaddle and PyTorch (via [ONNXIFI]). Compiler has a functional bridge to PaddlePaddle.
For these frameworks, we have successfully tested functionality with a few For these frameworks, we have successfully tested functionality with a few
deep learning workloads, and we plan to bring stable support for them in the deep learning workloads, and we plan to bring stable support for them in the
upcoming releases. upcoming releases.
...@@ -29,7 +29,6 @@ and multi-device support of nGraph Compiler, please refer to [Framework integrat ...@@ -29,7 +29,6 @@ and multi-device support of nGraph Compiler, please refer to [Framework integrat
| MXNet* 1.3 | :heavy_check_mark: | :heavy_check_mark: | MXNet* 1.3 | :heavy_check_mark: | :heavy_check_mark:
| ONNX 1.3 | :heavy_check_mark: | :heavy_check_mark: | ONNX 1.3 | :heavy_check_mark: | :heavy_check_mark:
| ONNX Runtime | Functional | No | ONNX Runtime | Functional | No
| PyTorch (via ONNXIFI) | Functional | No
| PaddlePaddle | Functional | No | PaddlePaddle | Functional | No
...@@ -70,7 +69,6 @@ stack, and early adopters will be able test them in 2019. ...@@ -70,7 +69,6 @@ stack, and early adopters will be able test them in 2019.
[Architecture and features]: ./ABOUT.md [Architecture and features]: ./ABOUT.md
[Upcoming DL accelerators]: https://www.intel.com/content/dam/www/public/us/en/documents/product-briefs/vision-accelerator-design-product-brief.pdf [Upcoming DL accelerators]: https://www.intel.com/content/dam/www/public/us/en/documents/product-briefs/vision-accelerator-design-product-brief.pdf
[import it]: https://ngraph.nervanasys.com/docs/latest/core/constructing-graphs/import.html [import it]: https://ngraph.nervanasys.com/docs/latest/core/constructing-graphs/import.html
[ONNXIFI]: https://github.com/onnx/onnx/blob/master/docs/ONNXIFI.md
[ONNX Runtime]: https://azure.microsoft.com/en-us/blog/onnx-runtime-is-now-open-source/ [ONNX Runtime]: https://azure.microsoft.com/en-us/blog/onnx-runtime-is-now-open-source/
[WinML]: http://docs.microsoft.com/en-us/windows/ai [WinML]: http://docs.microsoft.com/en-us/windows/ai
[How to]: https://ngraph.nervanasys.com/docs/latest/howto/index.html [How to]: https://ngraph.nervanasys.com/docs/latest/howto/index.html
......
...@@ -16,9 +16,6 @@ ...@@ -16,9 +16,6 @@
if (NGRAPH_ONNX_IMPORT_ENABLE) if (NGRAPH_ONNX_IMPORT_ENABLE)
add_subdirectory(onnx_import) add_subdirectory(onnx_import)
if (NGRAPH_ONNXIFI_ENABLE)
add_subdirectory(onnxifi)
endif()
endif() endif()
option(NGRAPH_FLUID_ENABLE "Enable build for PaddlePaddle Fluid support" ON) option(NGRAPH_FLUID_ENABLE "Enable build for PaddlePaddle Fluid support" ON)
......
...@@ -20,7 +20,6 @@ add_library(onnx_import_interface OBJECT ...@@ -20,7 +20,6 @@ add_library(onnx_import_interface OBJECT
core/node.cpp core/node.cpp
core/node.hpp core/node.hpp
core/operator_set.hpp core/operator_set.hpp
core/weight.hpp
onnx.cpp onnx.cpp
onnx.hpp) onnx.hpp)
......
...@@ -92,7 +92,7 @@ namespace ngraph ...@@ -92,7 +92,7 @@ namespace ngraph
} }
} // namespace detail } // namespace detail
Graph::Graph(const onnx::GraphProto& graph_proto, Model& model, const Weights& weights) Graph::Graph(const onnx::GraphProto& graph_proto, Model& model)
: m_graph_proto{&graph_proto} : m_graph_proto{&graph_proto}
, m_model{&model} , m_model{&model}
{ {
...@@ -123,7 +123,7 @@ namespace ngraph ...@@ -123,7 +123,7 @@ namespace ngraph
} }
const auto value_info = m_inputs.back(); const auto value_info = m_inputs.back();
auto ng_node = value_info.get_ng_node(m_parameters, m_initializers, weights); auto ng_node = value_info.get_ng_node(m_parameters, m_initializers);
add_provenance_tag_to_input(value_info, ng_node); add_provenance_tag_to_input(value_info, ng_node);
m_ng_node_cache[input.name()] = std::move(ng_node); m_ng_node_cache[input.name()] = std::move(ng_node);
} }
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include "ngraph/op/parameter.hpp" #include "ngraph/op/parameter.hpp"
#include "operator_set.hpp" #include "operator_set.hpp"
#include "value_info.hpp" #include "value_info.hpp"
#include "weight.hpp"
namespace ngraph namespace ngraph
{ {
...@@ -34,7 +33,7 @@ namespace ngraph ...@@ -34,7 +33,7 @@ namespace ngraph
class Graph class Graph
{ {
public: public:
Graph(const onnx::GraphProto& proto, Model& model, const Weights& weights = {}); Graph(const onnx::GraphProto& proto, Model& model);
const std::vector<Node>& get_nodes() const { return m_nodes; } const std::vector<Node>& get_nodes() const { return m_nodes; }
const std::vector<ValueInfo>& get_inputs() const { return m_inputs; } const std::vector<ValueInfo>& get_inputs() const { return m_inputs; }
const std::vector<ValueInfo>& get_outputs() const { return m_outputs; } const std::vector<ValueInfo>& get_outputs() const { return m_outputs; }
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include "node.hpp" #include "node.hpp"
#include "tensor.hpp" #include "tensor.hpp"
#include "utils/common.hpp" #include "utils/common.hpp"
#include "weight.hpp"
namespace ngraph namespace ngraph
{ {
...@@ -81,22 +80,13 @@ namespace ngraph ...@@ -81,22 +80,13 @@ namespace ngraph
std::shared_ptr<ngraph::Node> std::shared_ptr<ngraph::Node>
get_ng_node(ParameterVector& parameters, get_ng_node(ParameterVector& parameters,
const std::map<std::string, Tensor>& initializers, const std::map<std::string, Tensor>& initializers) const
const Weights& weights = {}) const
{ {
const auto it = initializers.find(get_name()); const auto it = initializers.find(get_name());
if (it != std::end(initializers)) if (it != std::end(initializers))
{ {
return get_ng_constant(it->second); return get_ng_constant(it->second);
} }
else
{
const auto pt = weights.find(get_name());
if (pt != std::end(weights))
{
return get_ng_constant(pt->second);
}
}
parameters.push_back(get_ng_parameter()); parameters.push_back(get_ng_parameter());
return parameters.back(); return parameters.back();
} }
...@@ -107,11 +97,6 @@ namespace ngraph ...@@ -107,11 +97,6 @@ namespace ngraph
return std::make_shared<op::Parameter>(get_element_type(), get_shape()); return std::make_shared<op::Parameter>(get_element_type(), get_shape());
} }
std::shared_ptr<op::Constant> get_ng_constant(const Weight& weight) const
{
return std::make_shared<op::Constant>(weight.type(), weight.shape(), weight.data());
}
std::shared_ptr<op::Constant> get_ng_constant(const Tensor& tensor) const std::shared_ptr<op::Constant> get_ng_constant(const Tensor& tensor) const
{ {
return tensor.get_ng_constant(); return tensor.get_ng_constant();
......
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <string>
#include <unordered_map>
#include <vector>
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor.hpp"
namespace ngraph
{
namespace onnx_import
{
/// \brief Weight for an input
class Weight
{
public:
Weight(const Weight&) = default;
Weight& operator=(const Weight&) = delete;
Weight() = delete;
Weight(Weight&&) = default;
Weight& operator=(Weight&&) = delete;
Weight(const element::Type& type, const Shape& shape, std::vector<char> data)
: m_shape{shape}
, m_type{type}
, m_data{std::move(data)}
{
for (const auto& value : m_shape)
{
m_size *= value;
}
}
const Shape& shape() const { return m_shape; }
std::size_t size() const { return m_size; }
const element::Type& type() const { return m_type; }
std::shared_ptr<runtime::Tensor> to_tensor(runtime::Backend& backend)
{
return backend.create_tensor(
m_type, m_shape, reinterpret_cast<void*>(m_data.data()));
}
const void* data() const { return reinterpret_cast<const void*>(m_data.data()); }
private:
Shape m_shape{};
const element::Type& m_type;
std::size_t m_size{1};
std::vector<char> m_data{};
};
using Weights = std::unordered_map<std::string, Weight>;
}
}
...@@ -52,7 +52,7 @@ namespace ngraph ...@@ -52,7 +52,7 @@ namespace ngraph
} // namespace error } // namespace error
} // namespace detail } // namespace detail
std::shared_ptr<Function> import_onnx_model(std::istream& sin, const Weights& weights) std::shared_ptr<Function> import_onnx_model(std::istream& sin)
{ {
onnx::ModelProto model_proto; onnx::ModelProto model_proto;
// Try parsing input as a binary protobuf message // Try parsing input as a binary protobuf message
...@@ -70,7 +70,7 @@ namespace ngraph ...@@ -70,7 +70,7 @@ namespace ngraph
} }
Model model{model_proto}; Model model{model_proto};
Graph graph{model_proto.graph(), model, weights}; Graph graph{model_proto.graph(), model};
auto function = std::make_shared<Function>( auto function = std::make_shared<Function>(
graph.get_ng_outputs(), graph.get_ng_parameters(), graph.get_name()); graph.get_ng_outputs(), graph.get_ng_parameters(), graph.get_name());
for (std::size_t i{0}; i < function->get_output_size(); ++i) for (std::size_t i{0}; i < function->get_output_size(); ++i)
...@@ -80,14 +80,14 @@ namespace ngraph ...@@ -80,14 +80,14 @@ namespace ngraph
return function; return function;
} }
std::shared_ptr<Function> import_onnx_model(const std::string& path, const Weights& weights) std::shared_ptr<Function> import_onnx_model(const std::string& path)
{ {
std::ifstream ifs{path, std::ios::in | std::ios::binary}; std::ifstream ifs{path, std::ios::in | std::ios::binary};
if (!ifs.is_open()) if (!ifs.is_open())
{ {
throw detail::error::file_open{path}; throw detail::error::file_open{path};
} }
return import_onnx_model(ifs, weights); return import_onnx_model(ifs);
} }
void register_operator(const std::string& name, void register_operator(const std::string& name,
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <string> #include <string>
#include "core/operator_set.hpp" #include "core/operator_set.hpp"
#include "core/weight.hpp"
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
namespace ngraph namespace ngraph
...@@ -70,27 +69,18 @@ namespace ngraph ...@@ -70,27 +69,18 @@ namespace ngraph
/// \brief Convert an ONNX model to nGraph function /// \brief Convert an ONNX model to nGraph function
/// The function translated serialized ONNX model to nGraph function. The serialized /// The function translated serialized ONNX model to nGraph function. The serialized
/// ONNX model is read from input stream. /// ONNX model is read from input stream.
/// \param sin input stream (e.g. file stream, memory stream, etc), /// \param sin input stream (e.g. file stream, memory stream, etc)
/// \param weights weights associated with the model. If weights are embedded into
/// the model this parameter shall be empty. Having weights in a model
/// and providing through this parameters is invalid (the weights from
/// the model will take precedence).
/// \return The function returns a nGraph function representing single output from graph. /// \return The function returns a nGraph function representing single output from graph.
NGRAPH_API NGRAPH_API
std::shared_ptr<Function> import_onnx_model(std::istream& sin, const Weights& weights = {}); std::shared_ptr<Function> import_onnx_model(std::istream& sin);
/// \brief Convert an ONNX model to nGraph functions /// \brief Convert an ONNX model to nGraph functions
/// The function translated serialized ONNX model to nGraph functions. The ONNX model /// The function translated serialized ONNX model to nGraph functions. The ONNX model
/// is read from ONNX file. /// is read from ONNX file.
/// \param filename file name (relative or absolute path name), /// \param filename file name (relative or absolute path name)
/// \param weights weights associated with the model. If weights are embedded into
/// the model this parameter shall be empty. Having weights in a model
/// and providing through this parameters is invalid (the weights from
/// the model will take precedence).
/// \return The function returns a nGraph function representing single output from graph. /// \return The function returns a nGraph function representing single output from graph.
NGRAPH_API NGRAPH_API
std::shared_ptr<Function> import_onnx_model(const std::string& filename, std::shared_ptr<Function> import_onnx_model(const std::string& filename);
const Weights& weights = {});
} // namespace onnx_import } // namespace onnx_import
......
...@@ -501,9 +501,6 @@ if (NGRAPH_ONNX_IMPORT_ENABLE) ...@@ -501,9 +501,6 @@ if (NGRAPH_ONNX_IMPORT_ENABLE)
onnx/onnx_import_reshape.in.cpp onnx/onnx_import_reshape.in.cpp
onnx/onnx_import_rnn.in.cpp onnx/onnx_import_rnn.in.cpp
onnx/onnx_import_quant.in.cpp) onnx/onnx_import_quant.in.cpp)
if (NGRAPH_ONNXIFI_ENABLE)
list(APPEND SRC onnx/onnxifi.cpp onnx/onnxifi_span.cpp)
endif()
endif() endif()
foreach(BACKEND_NAME ${ACTIVE_BACKEND_LIST}) foreach(BACKEND_NAME ${ACTIVE_BACKEND_LIST})
...@@ -600,11 +597,6 @@ if (NGRAPH_GPUH_ENABLE) ...@@ -600,11 +597,6 @@ if (NGRAPH_GPUH_ENABLE)
target_link_libraries(unit-test PRIVATE gpuh_backend) target_link_libraries(unit-test PRIVATE gpuh_backend)
endif() endif()
if (NGRAPH_ONNXIFI_ENABLE)
target_include_directories(unit-test SYSTEM PUBLIC ${ONNX_INCLUDE_DIR})
target_link_libraries(unit-test PRIVATE onnxifi-ngraph)
endif()
if (NGRAPH_MLIR_ENABLE) if (NGRAPH_MLIR_ENABLE)
target_include_directories(unit-test PRIVATE ${CMAKE_BINARY_DIR}/src/contrib/mlir) target_include_directories(unit-test PRIVATE ${CMAKE_BINARY_DIR}/src/contrib/mlir)
endif() endif()
......
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <cstring>
#include <gtest/gtest.h>
#include <onnx/onnxifi.h>
#include "ngraph/runtime/backend_manager.hpp"
// ===============================================[ onnxGetBackendIDs ] =======
constexpr std::size_t g_default_backend_ids_count{10};
TEST(onnxifi, get_backend_ids)
{
::onnxBackendID backendIDs[g_default_backend_ids_count];
std::size_t count{g_default_backend_ids_count};
::onnxStatus status{::onnxGetBackendIDs(backendIDs, &count)};
EXPECT_TRUE(status == ONNXIFI_STATUS_SUCCESS);
EXPECT_TRUE(count == ngraph::runtime::BackendManager::get_registered_backends().size());
}
TEST(onnxifi, get_backend_ids_buffer_null)
{
std::size_t count{0};
::onnxStatus status{::onnxGetBackendIDs(nullptr, &count)};
EXPECT_TRUE(status == ONNXIFI_STATUS_FALLBACK);
EXPECT_TRUE(count == ngraph::runtime::BackendManager::get_registered_backends().size());
}
TEST(onnxifi, get_backend_ids_count_null)
{
::onnxBackendID backendIDs[g_default_backend_ids_count];
::onnxStatus status{::onnxGetBackendIDs(backendIDs, nullptr)};
EXPECT_TRUE(status == ONNXIFI_STATUS_INVALID_POINTER);
}
TEST(onnxifi, get_backend_ids_null)
{
::onnxStatus status{::onnxGetBackendIDs(nullptr, nullptr)};
EXPECT_TRUE(status == ONNXIFI_STATUS_INVALID_POINTER);
}
TEST(onnxifi, get_backend_ids_consistency_check)
{
::onnxBackendID first_ids[g_default_backend_ids_count];
std::size_t first_count{g_default_backend_ids_count};
EXPECT_TRUE(::onnxGetBackendIDs(first_ids, &first_count) == ONNXIFI_STATUS_SUCCESS);
EXPECT_TRUE(first_count == ngraph::runtime::BackendManager::get_registered_backends().size());
::onnxBackendID second_ids[g_default_backend_ids_count];
std::size_t second_count{g_default_backend_ids_count};
EXPECT_TRUE(::onnxGetBackendIDs(second_ids, &second_count) == ONNXIFI_STATUS_SUCCESS);
EXPECT_TRUE(second_count == ngraph::runtime::BackendManager::get_registered_backends().size());
EXPECT_TRUE(first_count == second_count);
EXPECT_TRUE(std::memcmp(first_ids, second_ids, first_count) == 0);
}
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <gtest/gtest.h>
#include <vector>
#include "ngraph/frontend/onnxifi/span.hpp"
TEST(onnxifi, span)
{
using namespace ngraph::onnxifi;
std::vector<float> floats{0.f, 0.25f, 0.5f, 1.f, 2.f, 3.f, 4.f, 5.5f};
char* buffer{reinterpret_cast<char*>(floats.data())};
Span<float> span{buffer, floats.size()};
for (std::size_t index{0}; index < span.size(); ++index)
{
EXPECT_EQ(span.at(index), floats.at(index));
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment