Commit a6be6ea3 authored by tsocha's avatar tsocha Committed by Scott Cyphers

[ONNX] Enable OneHot operation (#2448)

* [ONNX] Enable OneHot operation

* Add UT

* Style check

* Change converts

* Change assert messages

* Update comments

* Update onehot.cpp
parent ee02dab7
...@@ -112,6 +112,8 @@ add_library(onnx_import STATIC ...@@ -112,6 +112,8 @@ add_library(onnx_import STATIC
op/mul.hpp op/mul.hpp
op/neg.hpp op/neg.hpp
op/not.hpp op/not.hpp
op/onehot.cpp
op/onehot.hpp
op/or.hpp op/or.hpp
op/pad.cpp op/pad.cpp
op/pad.hpp op/pad.hpp
......
//*****************************************************************************
// Copyright 2018-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <cstdint>
#include <memory>
#include "exceptions.hpp"
#include "ngraph/coordinate.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/convert.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/one_hot.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/subtract.hpp"
#include "onehot.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
NodeVector onehot(const Node& node)
{
NodeVector inputs{node.get_ng_inputs()};
auto indices =
std::make_shared<ngraph::op::Convert>(inputs.at(0), element::i64);
auto indices_shape = indices->get_shape();
auto depth = inputs.at(1);
auto values = inputs.at(2);
std::shared_ptr<ngraph::Node> off_value =
std::make_shared<ngraph::op::Slice>(values, Coordinate{0}, Coordinate{1});
std::shared_ptr<ngraph::Node> on_value =
std::make_shared<ngraph::op::Slice>(values, Coordinate{1}, Coordinate{2});
auto axis = node.get_attribute_value<std::int64_t>("axis", -1);
if (axis < 0)
{
axis += indices_shape.size() + 1;
}
ASSERT_VALID_ARGUMENT(node, (axis >= 0) && (axis <= indices_shape.size()))
<< "invalid 'axis' attribute: "
<< node.get_attribute_value<std::int64_t>("axis", -1);
auto constant_depth = std::dynamic_pointer_cast<ngraph::op::Constant>(depth);
ASSERT_VALID_ARGUMENT(node, constant_depth)
<< "Only constant values for depth input are supported for the OneHot "
"operator.";
std::int64_t depth_value = constant_depth->get_vector<std::int64_t>()[0];
auto output_shape = indices_shape;
// Insert OneHot axis on position pointed by an axis attribute.
// example:
// data_shape = (2, 2)
// axis = 1
// depth = 10
// output_shape = (2, 10, 2)
output_shape.insert(std::next(std::begin(output_shape), axis), depth_value);
std::shared_ptr<ngraph::Node> one_hot = std::make_shared<ngraph::op::Convert>(
std::make_shared<ngraph::op::OneHot>(indices, output_shape, axis),
values->get_element_type());
on_value = numpy_style_broadcast_for_binary_operation(one_hot, on_value)[1];
off_value = numpy_style_broadcast_for_binary_operation(one_hot, off_value)[1];
one_hot = one_hot * (on_value - off_value) + off_value;
return {one_hot};
}
} // namespace set_1
} //namespace op
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2018-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
NodeVector onehot(const Node& node);
} // namespace set_1
} //namespace op
} // namespace onnx_import
} // namespace ngraph
...@@ -71,6 +71,7 @@ ...@@ -71,6 +71,7 @@
#include "op/mul.hpp" #include "op/mul.hpp"
#include "op/neg.hpp" #include "op/neg.hpp"
#include "op/not.hpp" #include "op/not.hpp"
#include "op/onehot.hpp"
#include "op/or.hpp" #include "op/or.hpp"
#include "op/pad.cpp" #include "op/pad.cpp"
#include "op/pad.hpp" #include "op/pad.hpp"
...@@ -264,6 +265,7 @@ namespace ngraph ...@@ -264,6 +265,7 @@ namespace ngraph
REGISTER_OPERATOR("Neg", 1, neg); REGISTER_OPERATOR("Neg", 1, neg);
REGISTER_OPERATOR("Not", 1, logical_not); REGISTER_OPERATOR("Not", 1, logical_not);
REGISTER_OPERATOR("Or", 1, logical_or); REGISTER_OPERATOR("Or", 1, logical_or);
REGISTER_OPERATOR("OneHot", 1, onehot);
REGISTER_OPERATOR("Pad", 1, pad); REGISTER_OPERATOR("Pad", 1, pad);
REGISTER_OPERATOR("Pow", 1, pow); REGISTER_OPERATOR("Pow", 1, pow);
REGISTER_OPERATOR("PRelu", 1, prelu); REGISTER_OPERATOR("PRelu", 1, prelu);
......
 backend-test:
0
indices
depth
valuesy"OneHot*
axistest_onehot_with_axis*:
BdepthZ
indices


Z
depth

Z
values

b
y



B
\ No newline at end of file
 backend-test:
#
indices
depth
valuesy"OneHottest_onehot_without_axis*: BdepthZ
indices

Z
depth

Z
values

b
y


 B
\ No newline at end of file
...@@ -1970,6 +1970,36 @@ TEST(onnx_${BACKEND_NAME}, model_sign) ...@@ -1970,6 +1970,36 @@ TEST(onnx_${BACKEND_NAME}, model_sign)
EXPECT_TRUE(test::all_close_f(expected_outputs.front(), outputs.front())); EXPECT_TRUE(test::all_close_f(expected_outputs.front(), outputs.front()));
} }
TEST(onnx_${BACKEND_NAME}, model_one_hot_with_axis)
{
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/one_hot_axis.onnx"));
Inputs inputs{{1.0, 9.0, 2.0, 4.0}, {1.0, 3.0}};
Outputs expected_outputs{{1.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0, 3.0, 1.0, 1.0, 1.0,
1.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}};
Outputs outputs{execute(function, inputs, "${BACKEND_NAME}")};
EXPECT_TRUE(test::all_close_f(expected_outputs.front(), outputs.front()));
}
TEST(onnx_${BACKEND_NAME}, model_one_hot_without_axis)
{
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/one_hot_no_axis.onnx"));
std::vector<std::vector<std::int64_t>> inputs{{0, 7, 8}, {2, 5}};
std::vector<std::vector<std::int64_t>> expected_outputs{{5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 5, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 5, 2, 2, 2}};
std::vector<std::vector<std::int64_t>> outputs{execute(function, inputs, "${BACKEND_NAME}")};
EXPECT_TRUE(test::all_close(expected_outputs.front(), outputs.front()));
}
TEST(onnx_${BACKEND_NAME}, model_where) TEST(onnx_${BACKEND_NAME}, model_where)
{ {
auto function = auto function =
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment