Commit 8925436a authored by Tomasz Dołbniak's avatar Tomasz Dołbniak Committed by Scott Cyphers

[SPEC] ReduceLogicalAnd & ReduceLogicalOr (#3874)

* ReduceLogicalAnd op implementation

* ReduceLogicalOr op implementation

* Add basic constant folding support

* Fix typo

* Revert "Add basic constant folding support"

This reverts commit 5d14a1849e957858dd5f6615981b154a381a1127.

* Introduce and use a new base class for logical reductions

* Constant folding for v1::ReduceLogicalAnd

* Constant folding for v1::ReduceLogicalOr

* Obsolete cout removal
parent f2a8f6e5
......@@ -273,6 +273,10 @@ set (SRC
op/power.hpp
op/product.cpp
op/product.hpp
op/reduce_logical_and.cpp
op/reduce_logical_and.hpp
op/reduce_logical_or.cpp
op/reduce_logical_or.hpp
op/reduce_prod.cpp
op/reduce_prod.hpp
op/reduce_mean.cpp
......@@ -431,6 +435,8 @@ set (SRC
op/util/fused_op.hpp
op/util/index_reduction.cpp
op/util/index_reduction.hpp
op/util/logical_reduction_keep_dims.hpp
op/util/logical_reduction_keep_dims.cpp
op/util/logical_reduction.cpp
op/util/logical_reduction.hpp
op/util/rnn_cell_base.cpp
......
......@@ -183,6 +183,8 @@ NGRAPH_OP(Range, ngraph::op, 0)
NGRAPH_OP(Reciprocal, ngraph::op, 0)
NGRAPH_OP(Recv, ngraph::op::v0, 0)
NGRAPH_OP(ReduceMax, ngraph::op::v1, 1)
NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1, 1)
NGRAPH_OP(ReduceLogicalOr, ngraph::op::v1, 1)
NGRAPH_OP(ReduceMean, ngraph::op::v1, 1)
NGRAPH_OP(ReduceMin, ngraph::op::v1, 1)
NGRAPH_OP(ReduceProd, ngraph::op::v1, 1)
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/reduce_logical_and.hpp"
using namespace ngraph;
using namespace std;
constexpr NodeTypeInfo op::v1::ReduceLogicalAnd::type_info;
op::v1::ReduceLogicalAnd::ReduceLogicalAnd(const Output<Node>& data,
const Output<Node>& reduction_axes,
const bool keep_dims)
: LogicalReductionKeepDims(data, reduction_axes, keep_dims)
{
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::ReduceLogicalAnd::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<op::v1::ReduceLogicalAnd>(new_args.at(0), new_args.at(1), get_keep_dims());
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/util/logical_reduction_keep_dims.hpp"
namespace ngraph
{
namespace op
{
namespace v1
{
/// \brief Performs a reduction using "logical and"
///
/// The reduction is performed over slices of the first input. The slices shape depends
/// on the values passed to the second input - the axes.
class ReduceLogicalAnd : public util::LogicalReductionKeepDims
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"ReduceLogicalAnd", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
ReduceLogicalAnd() = default;
/// \brief Constructs a ReduceLogicalAnd node.
///
/// \param data - The input tensor with data to be reduced
/// \param reduction_axes - The input tensor with information about axes over which
/// the first tensor should be sliced prior to the reduction operation
/// \param keep_dims - Indicates if the axes used for reduction should be held/kept
ReduceLogicalAnd(const Output<Node>& data,
const Output<Node>& reduction_axes,
const bool keep_dims = false);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/reduce_logical_or.hpp"
using namespace ngraph;
using namespace std;
constexpr NodeTypeInfo op::v1::ReduceLogicalOr::type_info;
op::v1::ReduceLogicalOr::ReduceLogicalOr(const Output<Node>& data,
const Output<Node>& reduction_axes,
const bool keep_dims)
: LogicalReductionKeepDims(data, reduction_axes, keep_dims)
{
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::v1::ReduceLogicalOr::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<op::v1::ReduceLogicalOr>(new_args.at(0), new_args.at(1), get_keep_dims());
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/util/logical_reduction_keep_dims.hpp"
namespace ngraph
{
namespace op
{
namespace v1
{
/// \brief Performs a reduction using "logical or"
///
/// The reduction is performed over slices of the first input. The slices shape depends
/// on the values passed to the second input - the axes.
class ReduceLogicalOr : public util::LogicalReductionKeepDims
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"ReduceLogicalOr", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
ReduceLogicalOr() = default;
/// \brief Constructs a ReduceLogicalOr node.
///
/// \param data - The input tensor with data to be reduced
/// \param reduction_axes - The input tensor with information about axes over which
/// the first tensor should be sliced prior to the reduction operation
/// \param keep_dims - Indicates if the axes used for reduction should be held/kept
ReduceLogicalOr(const Output<Node>& data,
const Output<Node>& reduction_axes,
const bool keep_dims = false);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/util/logical_reduction_keep_dims.hpp"
#include "ngraph/op/constant.hpp"
using namespace std;
using namespace ngraph;
op::util::LogicalReductionKeepDims::LogicalReductionKeepDims(
const ngraph::Output<ngraph::Node>& arg,
const ngraph::Output<ngraph::Node>& reduction_axes,
const bool keep_dims)
: LogicalReduction(arg, reduction_axes)
, m_keep_dims{keep_dims}
{
}
void op::util::LogicalReductionKeepDims::validate_and_infer_types()
{
if (m_keep_dims)
{
const auto reduction_axes = get_reduction_axes();
const auto input_shape = get_input_partial_shape(0);
const auto input_rank = input_shape.rank();
PartialShape result_shape{PartialShape::dynamic()};
if (input_rank.is_static())
{
result_shape = PartialShape::dynamic(input_rank);
}
if (input_rank.is_static() && reduction_axes_constant())
{
std::vector<Dimension> dims;
for (const auto axis : reduction_axes)
{
NODE_VALIDATION_CHECK(this,
axis < size_t(input_rank),
"Reduction axis (",
axis,
") is out of bounds ",
"(argument shape: ",
input_shape,
", reduction axes: ",
reduction_axes,
")");
}
for (size_t i = 0; i < size_t(input_rank); i++)
{
if (reduction_axes.count(i) == 0)
{
dims.push_back(input_shape[i]);
}
else
{
dims.push_back(Dimension{1});
}
}
result_shape = PartialShape(dims);
}
set_input_is_relevant_to_shape(1);
set_output_type(0, get_input_element_type(0), result_shape);
}
else
{
LogicalReduction::validate_and_infer_types();
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/logical_reduction.hpp"
namespace ngraph
{
namespace op
{
namespace util
{
class NGRAPH_API LogicalReductionKeepDims : public util::LogicalReduction
{
protected:
LogicalReductionKeepDims() = default;
/// \param arg The tensor to be reduced.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
/// \param keep_dims If set to 1 it holds axes that are used for reduction.
LogicalReductionKeepDims(const Output<Node>& arg,
const Output<Node>& reduction_axes,
const bool keep_dims = false);
public:
void validate_and_infer_types() override;
/// \return If set to 1 it holds axes that are used for reduction.
/// For each such axis, output dimension is equal to 1.
bool get_keep_dims() const { return m_keep_dims; }
void set_keep_dims(bool keep_dims) { m_keep_dims = keep_dims; }
private:
bool m_keep_dims = false;
};
}
}
}
......@@ -145,6 +145,8 @@
#include "ngraph/op/quantized_convolution.hpp"
#include "ngraph/op/quantized_dot.hpp"
#include "ngraph/op/recv.hpp"
#include "ngraph/op/reduce_logical_and.hpp"
#include "ngraph/op/reduce_logical_or.hpp"
#include "ngraph/op/reduce_mean.hpp"
#include "ngraph/op/reduce_prod.hpp"
#include "ngraph/op/reduce_sum.hpp"
......
......@@ -118,6 +118,8 @@ NGRAPH_OP(Proposal, ngraph::op::v0)
NGRAPH_OP(Range, ngraph::op::v0)
NGRAPH_OP(Relu, ngraph::op::v0)
NGRAPH_OP(ReduceMax, ngraph::op::v1)
NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1)
NGRAPH_OP(ReduceLogicalOr, ngraph::op::v1)
NGRAPH_OP(ReduceMean, ngraph::op::v1)
NGRAPH_OP(ReduceMin, ngraph::op::v1)
NGRAPH_OP(ReduceProd, ngraph::op::v1)
......
......@@ -17,12 +17,29 @@
#include "constant_folding.hpp"
#include "ngraph/op/all.hpp"
#include "ngraph/op/any.hpp"
#include "ngraph/op/reduce_logical_and.hpp"
#include "ngraph/op/reduce_logical_or.hpp"
#include "ngraph/runtime/reference/all.hpp"
#include "ngraph/runtime/reference/any.hpp"
using namespace std;
using namespace ngraph;
static Shape get_shape_no_keep_dims(const AxisSet& reduction_axes, const Shape& input_shape)
{
Shape shape_no_keep_dims;
for (size_t i = 0; i < input_shape.size(); i++)
{
if (reduction_axes.count(i) == 0)
{
shape_no_keep_dims.push_back(input_shape[i]);
}
}
return shape_no_keep_dims;
}
static shared_ptr<op::Constant> fold_constant_logical_reduction(shared_ptr<op::Constant> constant,
shared_ptr<Node> reduction_node)
{
......@@ -44,6 +61,28 @@ static shared_ptr<op::Constant> fold_constant_logical_reduction(shared_ptr<op::C
reduction_node->get_shape(),
any->get_reduction_axes());
}
else if (auto reduce_and = as_type_ptr<::ngraph::op::v1::ReduceLogicalAnd>(reduction_node))
{
const auto reduction_axes = reduce_and->get_reduction_axes();
const auto input_shape = reduce_and->get_input_shape(0);
runtime::reference::all(constant->get_vector<char>().data(),
out_vec.data(),
constant->get_output_shape(0),
get_shape_no_keep_dims(reduction_axes, input_shape),
reduction_axes);
}
else if (auto reduce_or = as_type_ptr<::ngraph::op::v1::ReduceLogicalOr>(reduction_node))
{
const auto reduction_axes = reduce_or->get_reduction_axes();
const auto input_shape = reduce_or->get_input_shape(0);
runtime::reference::any(constant->get_vector<char>().data(),
out_vec.data(),
constant->get_output_shape(0),
get_shape_no_keep_dims(reduction_axes, input_shape),
reduction_axes);
}
else
{
NGRAPH_CHECK(false,
......@@ -64,7 +103,9 @@ void pass::ConstantFolding::construct_constant_logical_reduction()
make_shared<pattern::op::Label>(element::i64, Shape{2}, pattern::has_class<op::Constant>());
auto is_supported_reduction = [](std::shared_ptr<Node> n) {
return (pattern::has_class<::ngraph::op::All>()(n) ||
pattern::has_class<::ngraph::op::Any>()(n));
pattern::has_class<::ngraph::op::Any>()(n) ||
pattern::has_class<::ngraph::op::v1::ReduceLogicalAnd>()(n) ||
pattern::has_class<::ngraph::op::v1::ReduceLogicalOr>()(n));
};
auto reduction =
std::make_shared<pattern::op::Any>(element::i32,
......
......@@ -2389,6 +2389,18 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
break;
}
case OP_TYPEID::ReduceLogicalAnd_v1:
{
const auto keep_dims = node_js.at("keep_dims").get<bool>();
node = make_shared<op::v1::ReduceLogicalAnd>(args[0], args[1], keep_dims);
break;
}
case OP_TYPEID::ReduceLogicalOr_v1:
{
const auto keep_dims = node_js.at("keep_dims").get<bool>();
node = make_shared<op::v1::ReduceLogicalOr>(args[0], args[1], keep_dims);
break;
}
case OP_TYPEID::Relu:
{
node = make_shared<op::Relu>(args[0]);
......@@ -4060,12 +4072,6 @@ json JSONSerializer::serialize_node(const Node& n)
node["output_axes"] = tmp->get_output_axes();
break;
}
case OP_TYPEID::Recv:
{
auto tmp = static_cast<const op::Recv*>(&n);
node["source_id"] = tmp->get_src_id();
break;
}
case OP_TYPEID::RandomUniform:
{
auto tmp = static_cast<const op::RandomUniform*>(&n);
......@@ -4076,6 +4082,24 @@ json JSONSerializer::serialize_node(const Node& n)
}
case OP_TYPEID::Reciprocal: { break;
}
case OP_TYPEID::Recv:
{
auto tmp = static_cast<const op::Recv*>(&n);
node["source_id"] = tmp->get_src_id();
break;
}
case OP_TYPEID::ReduceLogicalAnd_v1:
{
const auto tmp = static_cast<const op::v1::ReduceLogicalAnd*>(&n);
node["keep_dims"] = tmp->get_keep_dims();
break;
}
case OP_TYPEID::ReduceLogicalOr_v1:
{
const auto tmp = static_cast<const op::v1::ReduceLogicalOr*>(&n);
node["keep_dims"] = tmp->get_keep_dims();
break;
}
case OP_TYPEID::ReduceMean_v1:
{
auto tmp = static_cast<const op::v1::ReduceMean*>(&n);
......
......@@ -1047,6 +1047,98 @@ TEST(constant_folding, const_all)
ASSERT_EQ(values_expected, values_out);
}
TEST(constant_folding, const_reduce_logical_and__no_keepdims)
{
const Shape input_shape{3, 3};
const vector<char> values_in{0, 1, 1, 0, 1, 0, 1, 1, 1};
const auto data = op::Constant::create(element::boolean, input_shape, values_in);
const auto axes = op::Constant::create(element::i64, {1}, {1});
const auto convert = make_shared<op::v1::ReduceLogicalAnd>(data, axes, false);
auto f = make_shared<Function>(convert, ParameterVector{});
pass::Manager pass_manager;
pass_manager.register_pass<pass::ConstantFolding>();
pass_manager.run_passes(f);
ASSERT_EQ(count_ops_of_type<op::v1::ReduceLogicalAnd>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
const auto new_const = as_type_ptr<op::Constant>(f->get_results().at(0)->get_argument(0));
ASSERT_TRUE(new_const);
const Shape expected_out_shape{3};
ASSERT_EQ(new_const->get_shape(), expected_out_shape);
const auto values_out = new_const->get_vector<char>();
const vector<char> values_expected{0, 0, 1};
ASSERT_EQ(values_expected, values_out);
}
TEST(constant_folding, const_reduce_logical_and__keepdims)
{
const Shape input_shape{3, 3};
const vector<char> values_in{0, 1, 1, 0, 1, 0, 1, 1, 1};
const auto data = op::Constant::create(element::boolean, input_shape, values_in);
const auto axes = op::Constant::create(element::i64, {1}, {1});
const auto convert = make_shared<op::v1::ReduceLogicalAnd>(data, axes, true);
auto f = make_shared<Function>(convert, ParameterVector{});
pass::Manager pass_manager;
pass_manager.register_pass<pass::ConstantFolding>();
pass_manager.run_passes(f);
ASSERT_EQ(count_ops_of_type<op::v1::ReduceLogicalAnd>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
const auto new_const = as_type_ptr<op::Constant>(f->get_results().at(0)->get_argument(0));
ASSERT_TRUE(new_const);
// the output shape is expected to have 'ones' at the positions specified in the reduction axes
// in case the keep_dims attribute of ReduceLogicalAnd is set to true
const Shape expected_out_shape{3, 1};
ASSERT_EQ(new_const->get_shape(), expected_out_shape);
const auto values_out = new_const->get_vector<char>();
const vector<char> values_expected{0, 0, 1};
ASSERT_EQ(values_expected, values_out);
}
TEST(constant_folding, const_reduce_logical_and__keepdims_3d)
{
const Shape input_shape{2, 2, 2};
const vector<char> values_in{1, 1, 0, 0, 1, 0, 0, 1};
const auto data = op::Constant::create(element::boolean, input_shape, values_in);
const auto axes = op::Constant::create(element::i64, {2}, {0, 2});
const auto convert = make_shared<op::v1::ReduceLogicalAnd>(data, axes, true);
auto f = make_shared<Function>(convert, ParameterVector{});
pass::Manager pass_manager;
pass_manager.register_pass<pass::ConstantFolding>();
pass_manager.run_passes(f);
ASSERT_EQ(count_ops_of_type<op::v1::ReduceLogicalAnd>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
const auto new_const = as_type_ptr<op::Constant>(f->get_results().at(0)->get_argument(0));
ASSERT_TRUE(new_const);
const Shape expected_out_shape{1, 2, 1};
ASSERT_EQ(new_const->get_shape(), expected_out_shape);
const auto values_out = new_const->get_vector<char>();
const vector<char> values_expected{0, 0};
ASSERT_EQ(values_expected, values_out);
}
TEST(constant_folding, const_any)
{
Shape input_shape{3, 3};
......@@ -1072,6 +1164,36 @@ TEST(constant_folding, const_any)
ASSERT_EQ(values_expected, values_out);
}
TEST(constant_folding, const_reduce_logical_or__no_keepdims)
{
const Shape input_shape{3, 3};
const vector<char> values_in{1, 0, 0, 1, 0, 1, 0, 0, 0};
const auto data = op::Constant::create(element::boolean, input_shape, values_in);
const auto axes = op::Constant::create(element::i64, {1}, {1});
const auto convert = make_shared<op::v1::ReduceLogicalOr>(data, axes, false);
auto f = make_shared<Function>(convert, ParameterVector{});
pass::Manager pass_manager;
pass_manager.register_pass<pass::ConstantFolding>();
pass_manager.run_passes(f);
ASSERT_EQ(count_ops_of_type<op::v1::ReduceLogicalAnd>(f), 0);
ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
const auto new_const = as_type_ptr<op::Constant>(f->get_results().at(0)->get_argument(0));
ASSERT_TRUE(new_const);
const Shape expected_out_shape{3};
ASSERT_EQ(new_const->get_shape(), expected_out_shape);
const auto values_out = new_const->get_vector<char>();
const vector<char> values_expected{1, 1, 0};
ASSERT_EQ(values_expected, values_out);
}
TEST(constant_folding, const_concat)
{
auto constant0 =
......
......@@ -102,8 +102,8 @@ TEST(opset, check_opset1)
CHECK_OPSET(op::v0::PriorBoxClustered, opset1::PriorBoxClustered)
CHECK_OPSET(op::v0::Proposal, opset1::Proposal)
CHECK_OPSET(op::v0::PSROIPooling, opset1::PSROIPooling)
// TODO CHECK_OPSET(op::v0::ReduceLogicalAnd, opset1::ReduceLogicalAnd)
// TODO CHECK_OPSET(op::v0::ReduceLogicalOr, opset1::ReduceLogicalOr)
CHECK_OPSET(op::v1::ReduceLogicalAnd, opset1::ReduceLogicalAnd)
CHECK_OPSET(op::v1::ReduceLogicalOr, opset1::ReduceLogicalOr)
CHECK_OPSET(op::v1::ReduceMax, opset1::ReduceMax)
CHECK_OPSET(op::v1::ReduceMean, opset1::ReduceMean)
CHECK_OPSET(op::v1::ReduceMin, opset1::ReduceMin)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment