Commit 9178c15a authored by Mateusz Bencer's avatar Mateusz Bencer Committed by Scott Cyphers

[SPEC][FusedOp] Add Mod operator (#3908)

* Mod operator introduced

* Introduced onnx importer, fixed implementation

* styles applied

* Refactored assert comment for mod

* Add failure mod test to plaidml manifest

* Code review remarks introduced

* Changed ops used in decompose to v1

* Moved Mod to op_v1_tbl
parent aeaaf8fb
......@@ -377,6 +377,8 @@ set (SRC
op/fused/lstm_sequence.hpp
op/fused/matmul.cpp
op/fused/matmul.hpp
op/fused/mod.cpp
op/fused/mod.hpp
op/fused/mvn.cpp
op/fused/mvn.hpp
op/fused/normalize_l2.cpp
......
......@@ -133,6 +133,8 @@ add_library(onnx_import STATIC
op/mean_variance_normalization.cpp
op/mean_variance_normalization.hpp
op/min.hpp
op/mod.hpp
op/mod.cpp
op/mul.hpp
op/neg.hpp
op/not.hpp
......@@ -220,7 +222,7 @@ add_dependencies(onnx_import_interface protobuf::libprotobuf onnx::libonnx onnx:
add_dependencies(onnx_import onnx_import_interface)
set_property(TARGET onnx_import PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(onnx_import
target_include_directories(onnx_import
SYSTEM PRIVATE ${ONNX_IMPORT_INCLUDE_DIR} ${NGRAPH_INCLUDE_PATH}
SYSTEM PRIVATE ${ONNX_INCLUDE_DIR} ${ONNX_PROTO_INCLUDE_DIR} ${Protobuf_INCLUDE_DIR})
target_link_libraries(onnx_import PRIVATE ${Protobuf_LIBRARIES} ${ONNX_PROTO_LIBRARY})
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <memory>
#include "mod.hpp"
#include "ngraph/frontend/onnx_import/exceptions.hpp"
#include "ngraph/op/abs.hpp"
#include "ngraph/op/fused/mod.hpp"
#include "ngraph/op/util/attr_types.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
NodeVector mod(const Node& node)
{
std::shared_ptr<ngraph::Node> dividend{node.get_ng_inputs().at(0)};
std::shared_ptr<ngraph::Node> divisor{node.get_ng_inputs().at(1)};
std::int64_t fmod = node.get_attribute_value<std::int64_t>("fmod", 0);
ASSERT_IS_SUPPORTED(node, fmod == 1)
<< "Only 'fmod=1' mode is supported for mod operator.";
return {std::make_shared<ngraph::op::Mod>(dividend, divisor)};
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "core/node.hpp"
#include "ngraph/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
NodeVector mod(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph
......@@ -81,6 +81,7 @@
#include "op/mean.hpp"
#include "op/mean_variance_normalization.hpp"
#include "op/min.hpp"
#include "op/mod.hpp"
#include "op/mul.hpp"
#include "op/neg.hpp"
#include "op/not.hpp"
......@@ -299,6 +300,7 @@ namespace ngraph
REGISTER_OPERATOR("MeanVarianceNormalization", 9, mean_variance_normalization);
REGISTER_OPERATOR("Min", 1, min);
REGISTER_OPERATOR("Min", 8, min);
REGISTER_OPERATOR("Mod", 1, mod);
REGISTER_OPERATOR("Mul", 1, mul);
REGISTER_OPERATOR("Mul", 7, mul);
REGISTER_OPERATOR("Neg", 1, neg);
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/fused/mod.hpp"
#include "ngraph/builder/make_constant.hpp"
#include "ngraph/op/abs.hpp"
#include "ngraph/op/convert.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/sign.hpp"
#include "ngraph/op/subtract.hpp"
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::Mod::type_info;
op::Mod::Mod(const Output<Node>& A, const Output<Node>& B, const AutoBroadcastSpec& auto_broadcast)
: FusedOp({A, B})
, m_auto_broadcast(auto_broadcast)
{
}
NodeVector op::Mod::decompose_op() const
{
const auto dividend = make_shared<op::Abs>(input_value(0));
const auto dividend_sign = make_shared<op::Sign>(input_value(0));
const auto dividend_et = dividend->get_element_type();
const auto divisor = make_shared<op::Abs>(input_value(1));
// truncated(a / b)
auto division = make_shared<op::Convert>(
make_shared<op::v1::Divide>(dividend, divisor, m_auto_broadcast), ngraph::element::i64);
division = make_shared<op::Convert>(division, dividend_et);
// truncated(a / b) * b
const auto multiplication = make_shared<op::v1::Multiply>(division, divisor, m_auto_broadcast);
// a mod b = a - truncated(a / b) * b
const auto mod = make_shared<op::Subtract>(dividend, multiplication, m_auto_broadcast);
// apply sign of dividend
return {make_shared<op::v1::Multiply>(dividend_sign, mod, m_auto_broadcast)};
}
shared_ptr<Node> op::Mod::copy_with_new_args(const NodeVector& new_args) const
{
return make_shared<Mod>(new_args.at(0), new_args.at(1), m_auto_broadcast);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/fused_op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Mod returns an element-wise division reminder with two given tensors applying
/// multi-directional broadcast rules.
class Mod : public ngraph::op::util::FusedOp
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Mod", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a Mod node.
///
/// \param A - Dividend tensor
/// \param B - Divisor tensor
/// \param auto_broadcast Auto broadcast specification
Mod(const Output<Node>& A,
const Output<Node>& B,
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastType::NUMPY);
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
const AutoBroadcastSpec& get_auto_broadcast() const { return m_auto_broadcast; }
private:
AutoBroadcastSpec m_auto_broadcast;
};
}
}
......@@ -107,7 +107,7 @@ NGRAPH_OP(MatMul, ngraph::op)
NGRAPH_OP(MaxPool, ngraph::op::v1)
NGRAPH_OP(Maximum, ngraph::op::v1)
NGRAPH_OP(Minimum, ngraph::op::v1)
// NGRAPH_OP(Mod, ngraph::op)
NGRAPH_OP(Mod, ngraph::op)
NGRAPH_OP(Multiply, ngraph::op::v1)
NGRAPH_OP(Negative, ngraph::op)
// NGRAPH_OP(NonMaxSuppression, ngraph::op)
......
......@@ -90,6 +90,7 @@
#include "ngraph/op/fused/lstm_cell.hpp"
#include "ngraph/op/fused/lstm_sequence.hpp"
#include "ngraph/op/fused/matmul.hpp"
#include "ngraph/op/fused/mod.hpp"
#include "ngraph/op/fused/mvn.hpp"
#include "ngraph/op/fused/normalize_l2.hpp"
#include "ngraph/op/fused/partial_slice.hpp"
......
......@@ -1909,6 +1909,7 @@ private:
case OP_TYPEID::MaxPool_v1:
case OP_TYPEID::Maximum_v1:
case OP_TYPEID::Minimum_v1:
case OP_TYPEID::Mod_v1:
case OP_TYPEID::Multiply_v1:
case OP_TYPEID::MVN:
case OP_TYPEID::Negative_v1:
......
......@@ -326,3 +326,6 @@ tile_3d_few_repeats
# dyn shape
dyn_generate_mask
# Test fails on intel gpu mac
model_mod
......@@ -2067,6 +2067,11 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
}
break;
}
case OP_TYPEID::Mod_v1:
{
node = make_shared<op::Mod>(
args[0], args[1], read_auto_broadcast(node_js, "auto_broadcast"));
}
case OP_TYPEID::Multiply:
case OP_TYPEID::Multiply_v1:
{
......@@ -3828,6 +3833,12 @@ json JSONSerializer::serialize_node(const Node& n)
}
break;
}
case OP_TYPEID::Mod_v1:
{
auto tmp = static_cast<const op::Mod*>(&n);
node["auto_broadcast"] = write_auto_broadcast(tmp->get_auto_broadcast());
break;
}
case OP_TYPEID::Multiply:
case OP_TYPEID::Multiply_v1:
{
......
ir_version: 5
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "A"
input: "B"
output: "Y"
op_type: "Mod"
attribute {
name: "fmod"
i: 1
type: INT
}
}
name: "test_mod"
input {
name: "A"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_value: 6
}
}
}
}
}
input {
name: "B"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_value: 6
}
}
}
}
}
output {
name: "Y"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_value: 6
}
}
}
}
}
}
opset_import {
version: 10
}
......@@ -1714,3 +1714,16 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, matmul_float_type)
test_case.add_expected_output<float>(Shape{3, 1}, std::vector<float>{1, 3, 5});
test_case.run();
}
NGRAPH_TEST(onnx_${BACKEND_NAME}, model_mod)
{
const auto mod_fn = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/mod_sign.prototxt"));
auto test_case = ngraph::test::NgraphTestCase(mod_fn, "${BACKEND_NAME}");
test_case.add_input<int64_t>({-8, 3, 4, 9, -17, 1});
test_case.add_input<int64_t>({22, -13, 8, -3, 7, 2});
test_case.add_expected_output<int64_t>(Shape{6}, {-8, 3, 4, 0, -3, 1});
test_case.run();
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment