Commit dc329cbc authored by Artur Wojcik's avatar Artur Wojcik Committed by Scott Cyphers

[ONNX] Support for Split op (#1377)

* onnx: add 'constant' operator
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: getting attribute value by name
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: fix code style
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: fix clang compilation warnings
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: exception
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: add 'split' operator
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: add public interface
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: add initial unit test for importer
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* onnx: initial implementetion of operator' set
Signed-off-by: 's avatarArtur Wojcik <artur.wojcik@intel.com>

* [WIP] Unit test for split operation.

* Fix Split Op bounds calculation + UT

* clang format

* Split Op with variable parts unit test.

* Remove unused headers

* Add missing ngraph install prefix for cmake in travis Dockerfile.

* Remove -Wno-zero-as-null-pointer-constant

* Code review

* Apply clang-format-3.9

* Add missing onnx_import interface files to CMakeList

* Copyright notice format
parent c625abfd
......@@ -8,6 +8,8 @@ RUN apt-get update && apt-get install -y \
clang-format-3.9 \
git \
curl \
protobuf-compiler \
libprotoc-dev \
zlib1g \
zlib1g-dev \
libtinfo-dev && \
......@@ -28,7 +30,10 @@ RUN pip install tox
COPY . /root/ngraph
RUN mkdir /root/ngraph/build
WORKDIR /root/ngraph/build
RUN cmake ../ -DNGRAPH_CPU_ENABLE=FALSE -DNGRAPH_UNIT_TEST_ENABLE=FALSE -DDNGRAPH_TOOLS_ENABLE=FALSE -DNGRAPH_USE_PREBUILT_LLVM=TRUE -DCMAKE_INSTALL_PREFIX="~/ngraph_dist"
RUN cmake .. -DNGRAPH_CPU_ENABLE=FALSE -DNGRAPH_UNIT_TEST_ENABLE=FALSE -DNGRAPH_TOOLS_ENABLE=FALSE \
-DNGRAPH_USE_PREBUILT_LLVM=TRUE -DNGRAPH_ONNX_IMPORT_ENABLE=TRUE -DCMAKE_INSTALL_PREFIX="~/ngraph_dist"
RUN make style-check
RUN make
RUN make install
......
......@@ -23,24 +23,27 @@ if (NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/onnx.proto")
endif()
add_custom_command(OUTPUT onnx.pb.cc onnx.pb.h
COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --cpp_out ${CMAKE_CURRENT_BINARY_DIR} --proto_path ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}/onnx.proto
DEPENDS onnx.proto)
COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --cpp_out ${CMAKE_CURRENT_BINARY_DIR} --proto_path ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}/onnx.proto
DEPENDS onnx.proto)
add_library(onnx_import_interface OBJECT
onnx.pb.h
onnx.proto)
onnx.cpp
onnx.hpp
onnx.pb.h
onnx.proto)
add_library(onnx_import STATIC
onnx.pb.cc
attribute.cpp
graph.cpp
model.hpp
node.cpp
op/add.hpp
op/constant.hpp
ops_bridge.cpp
tensor.hpp
value_info.hpp)
onnx.pb.cc
attribute.cpp
graph.cpp
model.hpp
node.cpp
op/add.hpp
op/constant.hpp
op/split.hpp
ops_bridge.cpp
tensor.hpp
value_info.hpp)
add_dependencies(onnx_import onnx_import_interface)
......@@ -53,10 +56,9 @@ target_include_directories(onnx_import_interface PRIVATE ${CMAKE_CURRENT_BINARY_
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(Apple)?Clang$")
target_compile_options(onnx_import PRIVATE -Wno-undef -Wno-reserved-id-macro -Wno-switch-enum
-Wno-extended-offsetof -Wno-zero-as-null-pointer-constant -Wno-shorten-64-to-32 -Wno-unused-macros
-Wno-missing-variable-declarations -Wno-unused-private-field)
-Wno-extended-offsetof -Wno-zero-as-null-pointer-constant -Wno-shorten-64-to-32 -Wno-unused-macros
-Wno-missing-variable-declarations -Wno-unused-private-field)
target_compile_options(onnx_import_interface PRIVATE -Wno-undef -Wno-reserved-id-macro -Wno-switch-enum
-Wno-extended-offsetof -Wno-zero-as-null-pointer-constant -Wno-shorten-64-to-32 -Wno-unused-macros
-Wno-missing-variable-declarations -Wno-unused-private-field)
-Wno-extended-offsetof -Wno-zero-as-null-pointer-constant -Wno-shorten-64-to-32 -Wno-unused-macros
-Wno-missing-variable-declarations -Wno-unused-private-field)
endif()
......@@ -16,8 +16,8 @@
#pragma once
#include <onnx.pb.h>
#include "ngraph/except.hpp"
#include "onnx.pb.h"
#include "tensor.hpp"
#define likely(__x) __builtin_expect(!!(__x), 1)
......
......@@ -23,7 +23,7 @@
#include "ngraph/op/parameter_vector.hpp"
#include "onnx.pb.h"
#include <onnx.pb.h>
#include "value_info.hpp"
namespace ngraph
......
......@@ -16,8 +16,8 @@
#pragma once
#include <onnx.pb.h>
#include <ostream>
#include "onnx.pb.h"
namespace ngraph
{
......
......@@ -22,7 +22,7 @@
#include "ngraph/node_vector.hpp"
#include "onnx.pb.h"
#include <onnx.pb.h>
#include "attribute.hpp"
#include "tensor.hpp"
......@@ -58,6 +58,7 @@ namespace ngraph
: m_node_proto{node_proto}
, m_graph{graph}
, m_attributes{std::begin(node_proto.attribute()), std::end(node_proto.attribute())}
, m_output_names{std::begin(node_proto.output()), std::end(node_proto.output())}
{
}
......@@ -73,6 +74,10 @@ namespace ngraph
const std::string& op_type() const { return m_node_proto.op_type(); }
const std::string& get_name() const { return m_node_proto.name(); }
const std::vector<std::reference_wrapper<const std::string>>& get_output_names() const
{
return m_output_names;
}
const std::string& output(int index) const { return m_node_proto.output(index); }
template <typename T>
T get_attribute_value(const std::string& name, T default_value) const
......@@ -106,6 +111,7 @@ namespace ngraph
const onnx::NodeProto& m_node_proto;
const Graph* m_graph;
std::vector<Attribute> m_attributes;
std::vector<std::reference_wrapper<const std::string>> m_output_names;
};
inline std::ostream& operator<<(std::ostream& outs, const Node& node)
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <fstream>
#include "ngraph/except.hpp"
#include "graph.hpp"
#include "model.hpp"
#include "node.hpp"
#include "onnx.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace detail
{
namespace error
{
struct file_open : ngraph_error
{
explicit file_open(const std::string& path)
: ngraph_error{"failure opening file:" + path}
{
}
};
struct stream_parse : ngraph_error
{
explicit stream_parse(std::istream&)
: ngraph_error{"failure parsing data from the stream"}
{
}
};
} // namespace error
} // namespace detail
std::vector<std::shared_ptr<Function>> load_onnx_model(std::istream& sin)
{
onnx::ModelProto model_proto;
if (!model_proto.ParseFromIstream(&sin))
{
throw detail::error::stream_parse{sin};
}
std::vector<std::shared_ptr<Function>> output_functions;
Model model{model_proto};
Graph graph{model_proto.graph()};
for (const auto& output : graph.get_outputs())
{
output_functions.emplace_back(std::make_shared<Function>(
graph.get_ng_node_from_cache(output.get_name()), graph.get_ng_parameters()));
}
return output_functions;
}
std::vector<std::shared_ptr<Function>> load_onnx_model(const std::string& path)
{
std::ifstream ifs{path, std::ios::in | std::ios::binary};
if (!ifs.is_open())
{
throw detail::error::file_open{path};
}
return load_onnx_model(ifs);
}
std::shared_ptr<Function> import_onnx_function(std::istream& sin)
{
return load_onnx_model(sin).front();
}
std::shared_ptr<Function> import_onnx_function(const std::string& path)
{
return load_onnx_model(path).front();
}
} // namespace onnx_import
} // namespace ngraph
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <iostream>
#include <string>
#include "ngraph/function.hpp"
namespace ngraph
{
namespace onnx_import
{
// Convert on ONNX model to a vector of nGraph Functions (input stream)
std::vector<std::shared_ptr<Function>> load_onnx_model(std::istream&);
// Convert an ONNX model to a vector of nGraph Functions
std::vector<std::shared_ptr<Function>> load_onnx_model(const std::string&);
// Convert the first output of an ONNX model to an nGraph Function (input stream)
std::shared_ptr<Function> import_onnx_function(std::istream&);
// Convert the first output of an ONNX model to an nGraph Function
std::shared_ptr<Function> import_onnx_function(const std::string&);
} // namespace onnx_import
} // namespace ngraph
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/frontend/onnx_import/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/slice.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace error
{
namespace op
{
namespace split
{
namespace detail
{
struct Error : ngraph_error
{
explicit Error(const std::string& name, const std::string& message)
: ngraph_error{"Split node (" + name + "): " + message}
{
}
};
}
struct OutOfRange : detail::Error
{
explicit OutOfRange(const std::string& name)
: Error{name,
"provided split axis is out of input tensor dimensions range."}
{
}
};
struct Parts : detail::Error
{
explicit Parts(const std::string& name,
std::size_t parts,
std::size_t axis_length)
: Error{name,
"tensor cannot be split into " + std::to_string(parts) +
" equal parts, along axis of length " +
std::to_string(axis_length)}
{
}
};
struct Sum : detail::Error
{
explicit Sum(const std::string& name, std::size_t parts, std::size_t axis)
: Error{name,
"provided lengths of split parts does not sum up to "
"length of axis we split on: " +
std::to_string(parts) + " != " + std::to_string(axis)}
{
}
};
} // namespace split
} // namespace op
} // namespace error
namespace op
{
namespace detail
{
template <typename T>
inline T get_valid_array_index(T left, T right)
{
return (left >= 0) ? std::min(left, right)
: std::max(static_cast<T>(0), right + left);
}
inline std::shared_ptr<ngraph::op::Slice>
make_ng_slice(const std::shared_ptr<ngraph::Node>& node,
std::vector<std::size_t> axes,
std::vector<std::size_t> starts,
std::vector<std::size_t> ends)
{
std::vector<std::size_t> upper_bounds{node->get_shape()};
std::vector<std::size_t> lower_bounds(upper_bounds.size());
for (std::size_t index{0}; index < axes.size(); ++index)
{
std::size_t axis{axes.at(index)};
lower_bounds.at(axis) =
get_valid_array_index(starts.at(index), node->get_shape().at(axis));
upper_bounds.at(axis) =
get_valid_array_index(ends.at(index), node->get_shape().at(axis));
}
return std::make_shared<ngraph::op::Slice>(node, lower_bounds, upper_bounds);
}
} // namespace detail
inline NodeVector split(const Node& node, const std::shared_ptr<ngraph::Node>& input)
{
std::size_t count_outputs{node.get_output_names().size()};
int64_t axis{node.get_attribute_value<int64_t>("axis", 0)};
std::size_t axis_to_split{static_cast<std::size_t>(axis)};
if (axis < 0)
{
axis_to_split = input->get_shape().size() + axis;
}
else if (axis_to_split >= input->get_shape().size())
{
throw error::op::split::OutOfRange{node.get_name()};
}
std::size_t length_axis_to_split{input->get_shape().at(axis_to_split)};
std::vector<std::size_t> length_parts;
try
{
length_parts = node.get_attribute_value<std::vector<std::size_t>>("split");
}
catch (const std::exception&)
{
if (length_axis_to_split % count_outputs)
{
throw error::op::split::Parts{
node.get_name(), count_outputs, length_axis_to_split};
}
length_parts.assign(count_outputs, length_axis_to_split / count_outputs);
}
std::size_t start_index{0};
NodeVector outputs;
for (const auto& length_part : length_parts)
{
std::size_t end_index{start_index + length_part};
outputs.push_back(
detail::make_ng_slice(input, {axis_to_split}, {start_index}, {end_index}));
start_index = end_index;
}
return outputs;
}
} // namespace op
} // namespace onnx_import
} // namespace ngraph
......@@ -20,6 +20,7 @@
#include "attribute.hpp"
#include "ngraph/frontend/onnx_import/op/add.hpp"
#include "ngraph/frontend/onnx_import/op/constant.hpp"
#include "ngraph/frontend/onnx_import/op/split.hpp"
#include "ops_bridge.hpp"
namespace ngraph
......@@ -46,6 +47,11 @@ namespace ngraph
return {op::constant(node.get_attribute_value<Tensor>("value"))};
}
NodeVector split(const Node& node)
{
return op::split(node, node.get_ng_inputs().at(0));
}
class ops_bridge
{
public:
......@@ -72,6 +78,7 @@ namespace ngraph
{
m_map.emplace("Add", std::bind(add, std::placeholders::_1));
m_map.emplace("Constant", std::bind(constant, std::placeholders::_1));
m_map.emplace("Split", std::bind(split, std::placeholders::_1));
}
NodeVector operator()(const Node& node) const
......@@ -80,7 +87,7 @@ namespace ngraph
{
return m_map.at(node.op_type())(node);
}
catch (const std::exception&)
catch (const std::out_of_range&)
{
throw detail::error::unknown_operation{node.op_type()};
}
......
......@@ -21,7 +21,7 @@
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
#include "onnx.pb.h"
#include <onnx.pb.h>
namespace ngraph
{
......
......@@ -23,7 +23,7 @@
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
#include "onnx.pb.h"
#include <onnx.pb.h>
#include "node.hpp"
#include "tensor.hpp"
......
......@@ -47,6 +47,10 @@ set(SRC
zero_dim_tensor_elimination.cpp
)
if (NGRAPH_ONNX_IMPORT_ENABLE)
list(APPEND SRC onnx_import.cpp)
endif()
if (NGRAPH_INTERPRETER_ENABLE)
set(SRC ${SRC} backend_debug_api.cpp builder.cpp backend_api.cpp)
endif()
......@@ -118,6 +122,10 @@ target_link_libraries(unit-test ngraph_test_util)
target_link_libraries(unit-test ngraph libgtest libjson pthread)
target_link_libraries(unit-test ${CMAKE_DL_LIBS})
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(Apple)?Clang$")
target_compile_options(unit-test PRIVATE -Wno-undef -Wno-reserved-id-macro)
endif()
if (NGRAPH_CPU_ENABLE)
# The INTERPRETER backend is required for graph_partition, convolution, and backwards unit tests
target_link_libraries(unit-test cpu_backend interpreter_backend)
......
ngraph ONNXImporter:

A
BX add_node1"Add

X
CY add_node2"Add
test_graphZ
A

Z
B

Z
C

b
Y

B
\ No newline at end of file
 backend-test:
/
inputoutput_1output_2"Split*
axistest_split_equal_parts_2dZ
input


b
output_1


b
output_2


B
\ No newline at end of file
 backend-test:
,
inputoutput_1output_2output_3"Split#test_split_equal_parts_default_axisZ
input

b
output_1

b
output_2

b
output_3

B
\ No newline at end of file
 backend-test:
?
inputoutput_1output_2"Split*
axis*
split@@test_split_variable_parts_2dZ
input


b
output_1


b
output_2


B
\ No newline at end of file
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <fstream>
#include <sstream>
#include "gtest/gtest.h"
#include "ngraph/frontend/onnx_import/onnx.hpp"
#include "ngraph/ngraph.hpp"
#include "util/test_tools.hpp"
TEST(onnx, model_add_abc)
{
auto model{ngraph::onnx_import::load_onnx_model(
ngraph::file_util::path_join(SERIALIZED_ZOO, "onnx/add_abc.onnx"))};
auto backend{ngraph::runtime::Backend::create("CPU")};
ngraph::Shape shape{1};
auto a{backend->create_tensor(ngraph::element::f32, shape)};
copy_data(a, std::vector<float>{1});
auto b{backend->create_tensor(ngraph::element::f32, shape)};
copy_data(b, std::vector<float>{2});
auto c{backend->create_tensor(ngraph::element::f32, shape)};
copy_data(c, std::vector<float>{3});
auto r{backend->create_tensor(ngraph::element::f32, shape)};
backend->call(model.front(), {r}, {a, b, c});
EXPECT_EQ((std::vector<float>{6}), read_vector<float>(r));
}
TEST(onnx, model_add_abc_initializers)
{
auto model{ngraph::onnx_import::load_onnx_model(
ngraph::file_util::path_join(SERIALIZED_ZOO, "onnx/add_abc_initializers.onnx"))};
auto backend{ngraph::runtime::Backend::create("CPU")};
ngraph::Shape shape{2, 2};
auto c{backend->create_tensor(ngraph::element::f32, shape)};
copy_data(c, std::vector<float>{1, 2, 3, 4});
auto r{backend->create_tensor(ngraph::element::f32, shape)};
backend->call(model.front(), {r}, {c});
EXPECT_EQ((std::vector<float>{3, 6, 9, 12}), read_vector<float>(r));
}
TEST(onnx, model_split_equal_parts_default)
{
auto model{ngraph::onnx_import::load_onnx_model(
ngraph::file_util::path_join(SERIALIZED_ZOO, "onnx/split_equal_parts_default.onnx"))};
auto args = std::vector<std::vector<float>>{{1, 2, 3, 4, 5, 6}};
auto expected_output = std::vector<std::vector<float>>{{1, 2}, {3, 4}, {5, 6}};
for (std::size_t i = 0; i < expected_output.size(); ++i)
{
auto result_vectors = execute(model[i], args, "CPU");
EXPECT_EQ(result_vectors.size(), 1);
EXPECT_EQ(expected_output[i], result_vectors.front());
}
}
TEST(onnx, model_split_equal_parts_2d)
{
// Split into 2 equal parts along axis=1
auto model{ngraph::onnx_import::load_onnx_model(
ngraph::file_util::path_join(SERIALIZED_ZOO, "onnx/split_equal_parts_2d.onnx"))};
auto args = std::vector<std::vector<float>>{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}};
// each output we get as a flattened vector
auto expected_output =
std::vector<std::vector<float>>{{0, 1, 2, 6, 7, 8}, {3, 4, 5, 9, 10, 11}};
for (std::size_t i = 0; i < expected_output.size(); ++i)
{
auto result_vectors = execute(model[i], args, "CPU");
EXPECT_EQ(result_vectors.size(), 1);
EXPECT_EQ(expected_output[i], result_vectors[0]);
}
}
TEST(onnx, model_split_variable_parts_2d)
{
// Split into variable parts {2, 4} along axis=1
auto model{ngraph::onnx_import::load_onnx_model(
ngraph::file_util::path_join(SERIALIZED_ZOO, "onnx/split_variable_parts_2d.onnx"))};
auto args = std::vector<std::vector<float>>{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}};
// each output we get as a flattened vector
auto expected_output =
std::vector<std::vector<float>>{{0, 1, 6, 7}, {2, 3, 4, 5, 8, 9, 10, 11}};
for (std::size_t i = 0; i < expected_output.size(); ++i)
{
auto result_vectors = execute(model[i], args, "CPU");
EXPECT_EQ(result_vectors.size(), 1);
EXPECT_EQ(expected_output[i], result_vectors[0]);
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment