Commit bb9b5be6 authored by Mateusz Bencer's avatar Mateusz Bencer Committed by Sang Ik Lee

[SPEC] Implement StridedSlice:v1 (#3722)

* DynSlice was adjusted to specyfication

* Fixed documentation

* Fixed styles

* Fixed clang warning

* Introduced StridedSlice as separate op

* revert dyn_slice

* Fixed StrideSlice implementation

* Enable downgrade pass in dynamic backend

* Implemented upgrade/downgrade pass

* Added serialization

* Styles applied

* Styles applied. Part.2

* Updated dynamic backends list

* Move StridedSlice outside experimental

* Make Slice as v0 of StridedSlice

* Styles applied

* Styles applied

* Fixed clang error

* Code review remarks introduced

* Move GenerateMask up in Opset0Downgrade to keep alphabetical sort

* Style apply
parent 5a61c135
......@@ -198,6 +198,7 @@ set (SRC
op/experimental/layers/roi_pooling.cpp
op/experimental/random_uniform.hpp
op/experimental/random_uniform.cpp
op/strided_slice.hpp
op/floor.cpp
op/floor.hpp
op/gather.cpp
......@@ -298,6 +299,8 @@ set (SRC
op/slice.hpp
op/softmax.cpp
op/softmax.hpp
op/strided_slice.cpp
op/strided_slice.hpp
op/sqrt.cpp
op/sqrt.hpp
op/stop_gradient.cpp
......
......@@ -202,6 +202,7 @@ namespace ngraph
#include "ngraph/op/softmax.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/stop_gradient.hpp"
#include "ngraph/op/strided_slice.hpp"
#include "ngraph/op/subtract.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/op/tan.hpp"
......
......@@ -24,54 +24,61 @@ namespace ngraph
{
namespace op
{
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
class Slice : public Op
namespace v0
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Slice", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a tensor slice operation
Slice() = default;
/// \brief Constructs a tensor slice operation.
///
/// \param arg The tensor to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param strides The slicing strides; for example, strides of `{n,m}` means to take
/// every nth row and every mth column of the input matrix.
Slice(const Output<Node>& arg,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Strides& strides);
/// \brief Constructs a tensor slice operation with unit strides; i.e., every element
/// inside the bounding box will be copied to the output slice.
///
/// \param arg The tensor to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
Slice(const Output<Node>& arg,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds);
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
class Slice : public Op
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Slice", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a tensor slice operation
Slice() = default;
/// \brief Constructs a tensor slice operation.
///
/// \param arg The tensor to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param strides The slicing strides; for example, strides of `{n,m}` means to
/// take
/// every nth row and every mth column of the input matrix.
Slice(const Output<Node>& arg,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Strides& strides);
/// \brief Constructs a tensor slice operation with unit strides; i.e., every
/// element
/// inside the bounding box will be copied to the output slice.
///
/// \param arg The tensor to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
Slice(const Output<Node>& arg,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
void validate_and_infer_types() override;
/// \return The inclusive lower-bound coordinates.
const Coordinate& get_lower_bounds() const { return m_lower_bounds; }
/// \return The exclusive upper-bound coordinates.
const Coordinate& get_upper_bounds() const { return m_upper_bounds; }
/// \return The slicing strides.
const Strides& get_strides() const { return m_strides; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
/// \return The inclusive lower-bound coordinates.
const Coordinate& get_lower_bounds() const { return m_lower_bounds; }
/// \return The exclusive upper-bound coordinates.
const Coordinate& get_upper_bounds() const { return m_upper_bounds; }
/// \return The slicing strides.
const Strides& get_strides() const { return m_strides; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
Coordinate m_lower_bounds;
Coordinate m_upper_bounds;
Strides m_strides;
};
Coordinate m_lower_bounds;
Coordinate m_upper_bounds;
Strides m_strides;
};
}
// default opset version
using v0::Slice;
}
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/strided_slice.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/validation_util.hpp"
#include <algorithm>
using namespace std;
using namespace ngraph;
constexpr NodeTypeInfo op::v1::StridedSlice::type_info;
op::v1::StridedSlice::StridedSlice(const Output<Node>& data,
const Output<Node>& begin,
const Output<Node>& end,
const Output<Node>& strides,
const std::vector<int64_t>& begin_mask,
const std::vector<int64_t>& end_mask,
const std::vector<int64_t>& new_axis_mask,
const std::vector<int64_t>& shrink_axis_mask,
const std::vector<int64_t>& ellipsis_mask)
: Op({data, begin, end, strides})
, m_begin_mask{begin_mask}
, m_end_mask{end_mask}
, m_new_axis_mask{new_axis_mask}
, m_shrink_axis_mask{shrink_axis_mask}
, m_ellipsis_mask{ellipsis_mask}
{
constructor_validate_and_infer_types();
}
op::v1::StridedSlice::StridedSlice(const Output<Node>& data,
const Output<Node>& begin,
const Output<Node>& end,
const std::vector<int64_t>& begin_mask,
const std::vector<int64_t>& end_mask,
const std::vector<int64_t>& new_axis_mask,
const std::vector<int64_t>& shrink_axis_mask,
const std::vector<int64_t>& ellipsis_mask)
: StridedSlice(data,
begin,
end,
op::Constant::create(element::i64,
Shape{begin_mask.size()},
vector<int64_t>(begin_mask.size(), 1)),
begin_mask,
end_mask,
new_axis_mask,
shrink_axis_mask,
ellipsis_mask)
{
}
void op::v1::StridedSlice::validate_and_infer_types()
{
const auto& begin_mask_et = get_input_element_type(1);
const auto& end_mask_et = get_input_element_type(2);
NODE_VALIDATION_CHECK(this,
begin_mask_et.compatible(element::Type_t::i64),
"Begin mask must have element type i64, but has ",
begin_mask_et);
NODE_VALIDATION_CHECK(this,
end_mask_et.compatible(element::Type_t::i64),
"End mask must have element type i64, but has ",
end_mask_et);
auto are_mask_elem_in_range = [](size_t e) { return e == 0 || e == 1; };
NODE_VALIDATION_CHECK(
this,
std::all_of(m_begin_mask.begin(), m_begin_mask.end(), are_mask_elem_in_range) &&
std::all_of(m_end_mask.begin(), m_end_mask.end(), are_mask_elem_in_range) &&
std::all_of(m_new_axis_mask.begin(), m_new_axis_mask.end(), are_mask_elem_in_range) &&
std::all_of(
m_shrink_axis_mask.begin(), m_shrink_axis_mask.end(), are_mask_elem_in_range) &&
std::all_of(m_ellipsis_mask.begin(), m_ellipsis_mask.end(), are_mask_elem_in_range),
"All masks of StridedSlice must have be 0 or 1");
const vector<size_t> attr_sizes = {m_begin_mask.size(),
m_end_mask.size(),
m_new_axis_mask.size(),
m_shrink_axis_mask.size(),
m_ellipsis_mask.size()};
const auto are_attr_sizes_eq =
std::all_of(attr_sizes.begin(), attr_sizes.end(), [&attr_sizes](size_t s) {
return (s == 0) || (attr_sizes[0] == s);
});
NODE_VALIDATION_CHECK(
this, are_attr_sizes_eq, "All masks of StridedSlice must have the same size");
const auto mask_size = m_begin_mask.size();
const auto& data_rank = get_input_partial_shape(0).rank();
if (data_rank.is_static())
{
NODE_VALIDATION_CHECK(
this, static_cast<size_t>(data_rank) == mask_size, "Data rank must be equal mask size");
}
const auto& begin_shape = get_input_partial_shape(1);
if (begin_shape.rank().is_static())
{
NODE_VALIDATION_CHECK(this,
static_cast<size_t>(begin_shape.rank()) == 1,
"Begin input must be 1D (begin rank: ",
begin_shape.rank(),
").");
}
const auto& end_shape = get_input_partial_shape(2);
if (end_shape.rank().is_static())
{
NODE_VALIDATION_CHECK(this,
static_cast<size_t>(end_shape.rank()) == 1,
"End input must be 1D (end rank: ",
end_shape.rank(),
").");
}
set_input_is_relevant_to_shape(1);
set_input_is_relevant_to_shape(2);
set_input_is_relevant_to_shape(3);
auto begin_const = as_type_ptr<op::Constant>(input_value(1).get_node_shared_ptr());
auto end_const = as_type_ptr<op::Constant>(input_value(2).get_node_shared_ptr());
auto strides = as_type_ptr<op::Constant>(input_value(3).get_node_shared_ptr());
if (begin_const && end_const && strides)
{
set_output_type(0,
get_input_element_type(0),
infer_slice_shape(this,
get_input_partial_shape(0),
begin_const->get_vector<int64_t>(),
end_const->get_vector<int64_t>(),
strides->get_vector<int64_t>(),
convert_mask_to_axis_set(get_begin_mask()),
convert_mask_to_axis_set(get_end_mask()),
convert_mask_to_axis_set(get_new_axis_mask()),
convert_mask_to_axis_set(get_shrink_axis_mask()),
convert_mask_to_axis_set(get_ellipsis_mask())));
}
else
{
set_output_type(0, get_input_element_type(0), PartialShape::dynamic(data_rank));
}
}
AxisSet op::v1::StridedSlice::convert_mask_to_axis_set(const std::vector<int64_t>& mask) const
{
AxisSet axis_set{};
for (size_t i = 0; i < static_cast<size_t>(mask.size()); ++i)
{
if (mask[i] == 1)
{
axis_set.emplace(i);
}
}
return axis_set;
}
shared_ptr<Node> op::v1::StridedSlice::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<v1::StridedSlice>(new_args.at(0),
new_args.at(1),
new_args.at(2),
new_args.at(3),
m_begin_mask,
m_end_mask,
m_new_axis_mask,
m_shrink_axis_mask,
m_ellipsis_mask);
}
void op::v1::StridedSlice::generate_adjoints(autodiff::Adjoints& /* adjoints */,
const NodeVector& /* deltas */)
{
throw ngraph_error("generate_adjoints not implemented for StridedSlice");
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include <vector>
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/attr_types.hpp"
namespace ngraph
{
namespace op
{
namespace v1
{
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
/// bounding box, optionally with stride.
class StridedSlice : public Op
{
public:
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Slice", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
StridedSlice() = default;
/// \brief Constructs a dynamic tensor strided slice operation.
///
/// \param data The tensor to be sliced.
/// \param begin 1D tensor with begin indexes for input blob slicing.
/// \param end 1D tensor with end indexes for input blob slicing.
/// \param strides The slicing strides; for example, strides of `{n,m}`
/// means to take every nth row and every mth column
/// of the input matrix.
/// \param begin_mask When begin_mask[i] equal to 1 means that the
/// corresponding dimension of the begin input is ignored.
/// \param end_mask When end_mask[i] is 1, the corresponding dimension of
/// the end input is ignored.
/// \param new_axis_mask If new_axis_mask[i] is 1, a length 1 dimension
/// is inserted on the i-th position.
/// \param shrink_axis_mask If shrink_axis_mask[i] is 1, the dimension
/// on the i-th position is deleted.
/// \param ellipsis_mask It inserts missing dimensions
/// on a position of a non-zero bit.
StridedSlice(const Output<Node>& data,
const Output<Node>& begin,
const Output<Node>& end,
const Output<Node>& strides,
const std::vector<int64_t>& begin_mask,
const std::vector<int64_t>& end_mask,
const std::vector<int64_t>& new_axis_mask = std::vector<int64_t>{},
const std::vector<int64_t>& shrink_axis_mask = std::vector<int64_t>{},
const std::vector<int64_t>& ellipsis_mask = std::vector<int64_t>{});
/// \brief Constructs a dynamic tensor strided slice operation.
///
/// \param data The tensor to be sliced.
/// \param begin 1D tensor with begin indexes for input blob slicing.
/// \param end 1D tensor with end indexes for input blob slicing.
/// \param begin_mask When begin_mask[i] equal to 1 means that the
/// corresponding dimension of the begin input is ignored.
/// \param end_mask When end_mask[i] is 1, the corresponding dimension of
/// the end input is ignored.
/// \param new_axis_mask If new_axis_mask[i] is 1, a length 1 dimension
/// is inserted on the i-th position.
/// \param shrink_axis_mask If shrink_axis_mask[i] is 1, the dimension
/// on the i-th position is deleted.
/// \param ellipsis_mask It inserts missing dimensions
/// on a position of a non-zero bit.
StridedSlice(const Output<Node>& data,
const Output<Node>& begin,
const Output<Node>& end,
const std::vector<int64_t>& begin_mask,
const std::vector<int64_t>& end_mask,
const std::vector<int64_t>& new_axis_mask = std::vector<int64_t>{},
const std::vector<int64_t>& shrink_axis_mask = std::vector<int64_t>{},
const std::vector<int64_t>& ellipsis_mask = std::vector<int64_t>{});
const std::vector<int64_t>& get_begin_mask() const { return m_begin_mask; }
const std::vector<int64_t>& get_end_mask() const { return m_end_mask; }
const std::vector<int64_t>& get_new_axis_mask() const { return m_new_axis_mask; }
const std::vector<int64_t>& get_shrink_axis_mask() const
{
return m_shrink_axis_mask;
}
const std::vector<int64_t>& get_ellipsis_mask() const { return m_ellipsis_mask; }
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
void validate_and_infer_types() override;
size_t get_version() const override { return 1; }
protected:
void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
private:
AxisSet convert_mask_to_axis_set(const std::vector<int64_t>& mask) const;
std::vector<int64_t> m_begin_mask;
std::vector<int64_t> m_end_mask;
std::vector<int64_t> m_new_axis_mask;
std::vector<int64_t> m_shrink_axis_mask;
std::vector<int64_t> m_ellipsis_mask;
};
}
}
}
......@@ -31,8 +31,13 @@
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/reverse.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/strided_slice.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/pass/opset0_downgrade.hpp"
#include "ngraph/slice_plan.hpp"
#include <algorithm>
using namespace std;
using namespace ngraph;
......@@ -231,6 +236,25 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
modified = true;
break;
}
case OP_TYPEID::GenerateMask:
{
auto tmp = dynamic_cast<const op::v1::GenerateMask*>(node.get());
NGRAPH_CHECK(node->input_value(1).get_node_shared_ptr()->is_constant());
auto mask_shape =
static_pointer_cast<op::Constant>(node->input_value(1).get_node_shared_ptr())
->get_shape_val();
auto seed = tmp->get_seed();
auto use_seed = tmp->get_use_seed();
auto probability = tmp->get_probability();
auto et = tmp->get_element_type();
auto replacement_node = make_shared<op::v0::GenerateMask>(
node->input(0).get_source_output(), mask_shape, et, seed, probability, use_seed);
replace_node(node, replacement_node);
modified = true;
break;
}
case OP_TYPEID::MaxPool:
{
auto tmp = as_type_ptr<op::v1::MaxPool>(node);
......@@ -369,6 +393,73 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
modified = true;
break;
}
case OP_TYPEID::Slice:
{
auto convert_mask_to_axes = [](const std::vector<int64_t>& mask) {
AxisSet axes{};
for (auto i = 0; i < mask.size(); ++i)
{
if (mask[i] == 1)
{
axes.emplace(i);
}
}
return axes;
};
const auto input_data = node->input_value(0);
const auto input_data_pshape = input_data.get_partial_shape();
NGRAPH_CHECK(input_data_pshape.is_static(),
"Unable to convert StridedSlice:v1 to Slice:v0 "
"if input rank is not static. Node: ",
*node);
const auto begin_const =
as_type_ptr<op::Constant>(node->input_value(1).get_node_shared_ptr());
const auto end_const =
as_type_ptr<op::Constant>(node->input_value(2).get_node_shared_ptr());
const auto strides = as_type_ptr<op::Constant>(node->input_value(3).get_node_shared_ptr());
NGRAPH_CHECK(begin_const && end_const && strides,
"Unable to convert StridedSlice:v1 to Slice:v0 "
"if begin, end or strides are not constant. Node: ",
*node);
const auto tmp = as_type_ptr<op::v1::StridedSlice>(node);
SlicePlan p = make_slice_plan(input_data_pshape.to_shape(),
begin_const->get_vector<int64_t>(),
end_const->get_vector<int64_t>(),
strides->get_vector<int64_t>(),
convert_mask_to_axes(tmp->get_begin_mask()),
convert_mask_to_axes(tmp->get_end_mask()),
convert_mask_to_axes(tmp->get_new_axis_mask()),
convert_mask_to_axes(tmp->get_shrink_axis_mask()),
convert_mask_to_axes(tmp->get_ellipsis_mask()));
shared_ptr<Node> replacement_node =
make_shared<op::v0::Slice>(input_data,
Coordinate(p.begins.begin(), p.begins.end()),
Coordinate(p.ends.begin(), p.ends.end()),
Strides(p.strides.begin(), p.strides.end()));
if (p.reshape_in_shape != p.reshape_out_shape)
{
replacement_node =
make_shared<op::Reshape>(replacement_node,
ngraph::get_default_order(p.reshape_in_shape),
p.reshape_out_shape);
}
if (!p.reverse_axes.empty())
{
replacement_node = make_shared<op::Reverse>(replacement_node, p.reverse_axes);
}
replace_node(node, replacement_node);
break;
}
case OP_TYPEID::Sum:
{
auto tmp = as_type_ptr<op::v1::ReduceSum>(node);
......@@ -403,27 +494,6 @@ bool pass::Opset0Downgrade::run_on_node(shared_ptr<Node> node)
modified = true;
break;
}
case OP_TYPEID::GenerateMask:
{
auto tmp = dynamic_cast<const op::v1::GenerateMask*>(node.get());
NGRAPH_CHECK(node->input_value(1).get_node_shared_ptr()->is_constant());
auto mask_shape =
static_pointer_cast<op::Constant>(node->input_value(1).get_node_shared_ptr())
->get_shape_val();
auto seed = tmp->get_seed();
auto use_seed = tmp->get_use_seed();
auto probability = tmp->get_probability();
auto et = tmp->get_element_type();
auto replacement_node = make_shared<op::v0::GenerateMask>(
node->input(0).get_source_output(), mask_shape, et, seed, probability, use_seed);
replace_node(node, replacement_node);
modified = true;
break;
}
default: break;
}
#if defined(__clang__)
......
......@@ -29,7 +29,9 @@
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/reverse.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/softmax.hpp"
#include "ngraph/op/strided_slice.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/op/topk.hpp"
......@@ -413,6 +415,30 @@ bool pass::Opset1Upgrade::run_on_node(shared_ptr<Node> node)
modified = true;
break;
}
case OP_TYPEID::Slice:
{
const auto tmp = as_type_ptr<op::v0::Slice>(node);
const auto data = node->input(0).get_source_output();
const auto begin = op::Constant::create(
element::i64, Shape{tmp->get_lower_bounds().size()}, tmp->get_lower_bounds());
const auto end = op::Constant::create(
element::i64, Shape{tmp->get_upper_bounds().size()}, tmp->get_upper_bounds());
const auto strides = op::Constant::create(
element::i64, Shape{tmp->get_strides().size()}, tmp->get_strides());
int64_t input_size = tmp->get_lower_bounds().size();
auto replacement_node = make_shared<op::v1::StridedSlice>(data,
begin,
end,
strides,
vector<int64_t>(input_size, 0),
vector<int64_t>(input_size, 0));
replace_node(node, replacement_node);
modified = true;
break;
}
case OP_TYPEID::Sum:
{
bool keep_dims = false;
......
......@@ -32,6 +32,7 @@
#include "ngraph/op/pad.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/reverse.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/op/topk.hpp"
#include "ngraph/runtime/cpu/cpu_external_function.hpp"
......@@ -86,7 +87,6 @@ namespace ngraph
class Constant;
class Reshape;
class Sign;
class Slice;
class Exp;
class EmbeddingLookup;
class Sin;
......
......@@ -146,6 +146,7 @@
#include "ngraph/op/softmax.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/stop_gradient.hpp"
#include "ngraph/op/strided_slice.hpp"
#include "ngraph/op/subtract.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/op/tan.hpp"
......@@ -2143,10 +2144,30 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
}
case OP_TYPEID::Slice:
{
auto lower_bounds = node_js.at("lower_bounds").get<vector<size_t>>();
auto upper_bounds = node_js.at("upper_bounds").get<vector<size_t>>();
auto strides = node_js.at("strides").get<vector<size_t>>();
node = make_shared<op::Slice>(args[0], lower_bounds, upper_bounds, strides);
if (op_version == 0)
{
auto lower_bounds = node_js.at("lower_bounds").get<vector<size_t>>();
auto upper_bounds = node_js.at("upper_bounds").get<vector<size_t>>();
auto strides = node_js.at("strides").get<vector<size_t>>();
node = make_shared<op::Slice>(args[0], lower_bounds, upper_bounds, strides);
}
if (op_version == 1)
{
auto begin_mask = node_js.at("begin_mask").get<vector<int64_t>>();
auto end_mask = node_js.at("end_mask").get<vector<int64_t>>();
auto new_axis_mask = node_js.at("new_axis_mask").get<vector<int64_t>>();
auto shrink_axis_mask = node_js.at("shrink_axis_mask").get<vector<int64_t>>();
auto ellipsis_mask = node_js.at("ellipsis_mask").get<vector<int64_t>>();
node = make_shared<op::v1::StridedSlice>(args[0],
args[1],
args[2],
args[3],
begin_mask,
end_mask,
new_axis_mask,
shrink_axis_mask,
ellipsis_mask);
}
break;
}
case OP_TYPEID::Softmax:
......@@ -3434,10 +3455,22 @@ json JSONSerializer::serialize_node(const Node& n)
}
case OP_TYPEID::Slice:
{
auto tmp = static_cast<const op::Slice*>(&n);
node["lower_bounds"] = tmp->get_lower_bounds();
node["upper_bounds"] = tmp->get_upper_bounds();
node["strides"] = tmp->get_strides();
if (op_version == 0)
{
auto tmp = static_cast<const op::Slice*>(&n);
node["lower_bounds"] = tmp->get_lower_bounds();
node["upper_bounds"] = tmp->get_upper_bounds();
node["strides"] = tmp->get_strides();
}
if (op_version == 1)
{
auto tmp = static_cast<const op::v1::StridedSlice*>(&n);
node["begin_mask"] = tmp->get_begin_mask();
node["end_mask"] = tmp->get_end_mask();
node["new_axis_mask"] = tmp->get_new_axis_mask();
node["shrink_axis_mask"] = tmp->get_shrink_axis_mask();
node["ellipsis_mask"] = tmp->get_ellipsis_mask();
}
break;
}
case OP_TYPEID::SpaceToDepth:
......
......@@ -77,6 +77,7 @@ set(SRC
opset_pass/poolings_opset_pass.cpp
opset_pass/product_opset_pass.cpp
opset_pass/reverse_opset_pass.cpp
opset_pass/slice_opset_pass.cpp
opset_pass/softmax_opset_pass.cpp
opset_pass/sum_opset_pass.cpp
opset_pass/topk_opset_pass.cpp
......@@ -115,6 +116,7 @@ set(SRC
type_prop/dyn_replace_slice.cpp
type_prop/dyn_reshape.cpp
type_prop/dyn_slice.cpp
type_prop/strided_slice.cpp
type_prop/elu.cpp
type_prop/embedding_lookup.cpp
type_prop/fake_quantize.cpp
......@@ -280,6 +282,7 @@ set(MULTI_TEST_SRC
backend/dyn_replace_slice_reference.in.cpp
backend/dyn_reshape.in.cpp
backend/dyn_slice_reference.in.cpp
backend/strided_slice.in.cpp
backend/dynamic.in.cpp
backend/embedding_lookup.in.cpp
backend/erf.in.cpp
......
This diff is collapsed.
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/opset0_downgrade.hpp"
#include "ngraph/pass/opset1_upgrade.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(opset_transform, opset1_dyn_slice_upgrade_pass)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{7, 4, 6, 8});
Coordinate lower_bounds{2, 1, 4, 0};
Coordinate upper_bounds{4, 3, 5, 1};
Strides strides{1, 2, 1, 2};
auto slice_v0 = make_shared<op::v0::Slice>(arg, lower_bounds, upper_bounds, strides);
const auto result = make_shared<op::Result>(slice_v0);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset1Upgrade>();
pass_manager.run_passes(f);
const auto pass_replacement_node =
f->get_result()->input(0).get_source_output().get_node_shared_ptr();
const auto strided_slice_v1 = as_type_ptr<op::v1::StridedSlice>(pass_replacement_node);
auto begin_const =
as_type_ptr<op::Constant>(strided_slice_v1->input_value(1).get_node_shared_ptr());
auto end_const =
as_type_ptr<op::Constant>(strided_slice_v1->input_value(2).get_node_shared_ptr());
auto strides_const =
as_type_ptr<op::Constant>(strided_slice_v1->input_value(3).get_node_shared_ptr());
EXPECT_EQ(strided_slice_v1->description(), "Slice");
EXPECT_EQ(strided_slice_v1->get_version(), 1);
EXPECT_EQ(strided_slice_v1->get_begin_mask(), vector<int64_t>(4, 0));
EXPECT_EQ(strided_slice_v1->get_end_mask(), vector<int64_t>(4, 0));
EXPECT_EQ(begin_const->get_vector<int64_t>(),
vector<int64_t>(lower_bounds.begin(), lower_bounds.end()));
EXPECT_EQ(end_const->get_vector<int64_t>(),
vector<int64_t>(upper_bounds.begin(), upper_bounds.end()));
EXPECT_EQ(strides_const->get_vector<int64_t>(),
vector<int64_t>(strides.begin(), strides.end()));
}
TEST(opset_transform, opset1_strided_slice_downgrade_pass)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{5, 7, 6, 8});
auto begin = op::Constant::create(element::i64, Shape{4}, {1, 2, 1, 2});
auto end = op::Constant::create(element::i64, Shape{4}, {3, 4, 5, 6});
auto strided_slice_v1 = make_shared<op::v1::StridedSlice>(
data, begin, end, vector<int64_t>{0, 0, 1, 0}, vector<int64_t>{1, 0, 0, 0});
const auto result = make_shared<op::Result>(strided_slice_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
pass_manager.run_passes(f);
const auto pass_replacement_node =
f->get_result()->input(0).get_source_output().get_node_shared_ptr();
const auto slice_v0 = as_type_ptr<op::v0::Slice>(pass_replacement_node);
EXPECT_EQ(slice_v0->description(), "Slice");
EXPECT_EQ(slice_v0->get_version(), 0);
EXPECT_EQ(slice_v0->get_lower_bounds(), Coordinate({1, 2, 0, 2}));
EXPECT_EQ(slice_v0->get_upper_bounds(), Coordinate({5, 4, 5, 6}));
EXPECT_EQ(slice_v0->get_strides(), Strides({1, 1, 1, 1}));
}
TEST(opset_transform, opset1_strided_slice_downgrade_pass_dynamic_input_shape)
{
auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto begin = op::Constant::create(element::i64, Shape{4}, {1, 2, 1, 2});
auto end = op::Constant::create(element::i64, Shape{4}, {3, 4, 5, 6});
auto strided_slice_v1 = make_shared<op::v1::StridedSlice>(
data, begin, end, vector<int64_t>{0, 0, 1, 0}, vector<int64_t>{1, 0, 0, 0});
const auto result = make_shared<op::Result>(strided_slice_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
try
{
pass_manager.run_passes(f);
FAIL() << "Exception after Opset0Downgrade pass was not thrown.";
}
catch (const ngraph_error& error)
{
EXPECT_HAS_SUBSTRING(
error.what(),
std::string(
"Unable to convert StridedSlice:v1 to Slice:v0 if input rank is not static."));
}
catch (...)
{
FAIL() << "StridedSlice pass failed for unexpected reason";
}
}
TEST(opset_transform, opset1_strided_slice_downgrade_pass_end_not_constant)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{5, 7, 6, 8});
auto begin = op::Constant::create(element::i64, Shape{4}, {1, 2, 1, 2});
auto end = make_shared<op::Parameter>(element::i64, Shape{4});
auto strided_slice_v1 = make_shared<op::v1::StridedSlice>(
data, begin, end, vector<int64_t>{0, 0, 1, 0}, vector<int64_t>{1, 0, 0, 0});
const auto result = make_shared<op::Result>(strided_slice_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data, end});
ngraph::pass::Manager pass_manager;
pass_manager.register_pass<pass::Opset0Downgrade>();
try
{
pass_manager.run_passes(f);
FAIL() << "Exception after Opset0Downgrade pass was not thrown.";
}
catch (const ngraph_error& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("Unable to convert StridedSlice:v1 to Slice:v0 "
"if begin, end or strides are not constant"));
}
catch (...)
{
FAIL() << "StridedSlice pass failed for unexpected reason";
}
}
......@@ -440,3 +440,45 @@ TEST(serialize, opset1_pad)
EXPECT_EQ(g_pad->get_version(), 1);
EXPECT_EQ(dynamic_cast<const op::v1::Pad*>(g_pad.get())->get_pad_mode(), pad_mode);
}
TEST(serialize, opset1_strided_slice)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto begin = make_shared<op::Parameter>(element::i64, Shape{4});
auto end = make_shared<op::Parameter>(element::i64, Shape{4});
auto strides = make_shared<op::Parameter>(element::i64, Shape{4});
const std::vector<int64_t> begin_mask{1, 0, 1, 0};
const std::vector<int64_t> end_mask{1, 1, 1, 0};
const std::vector<int64_t> new_axis_mask{0, 0, 1, 1};
const std::vector<int64_t> shrink_axis_mask{0, 0, 0, 0};
const std::vector<int64_t> ellipsis_mask{1, 1, 1, 1};
auto strided_slice_in = make_shared<op::v1::StridedSlice>(data,
begin,
end,
strides,
begin_mask,
end_mask,
new_axis_mask,
shrink_axis_mask,
ellipsis_mask);
auto result = make_shared<op::Result>(strided_slice_in);
auto f =
make_shared<Function>(ResultVector{result}, ParameterVector{data, begin, end, strides});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_strided_slice_v1 = g_result->input(0).get_source_output().get_node_shared_ptr();
auto strided_slice_out = as_type_ptr<op::v1::StridedSlice>(g_strided_slice_v1);
EXPECT_EQ(strided_slice_out->description(), "Slice");
EXPECT_EQ(strided_slice_out->get_version(), 1);
EXPECT_EQ(strided_slice_out->get_begin_mask(), begin_mask);
EXPECT_EQ(strided_slice_out->get_end_mask(), end_mask);
EXPECT_EQ(strided_slice_out->get_new_axis_mask(), new_axis_mask);
EXPECT_EQ(strided_slice_out->get_shrink_axis_mask(), shrink_axis_mask);
EXPECT_EQ(strided_slice_out->get_ellipsis_mask(), ellipsis_mask);
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
#include <memory>
using namespace std;
using namespace ngraph;
TEST(type_prop, strided_slice_begin_incorrect_type)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto begin = make_shared<op::Parameter>(element::i32, Shape{4});
auto end = make_shared<op::Parameter>(element::i64, Shape{4});
try
{
auto strided_slice = make_shared<op::v1::StridedSlice>(
data, begin, end, vector<int64_t>{1, 0, 1, 0}, vector<int64_t>{1, 0, 1, 0});
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect begin type exception not thrown.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Begin mask must have element type i64"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, strided_slice_end_incorrect_type)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto begin = make_shared<op::Parameter>(element::i64, Shape{4});
auto end = make_shared<op::Parameter>(element::i32, Shape{4});
try
{
auto strided_slice = make_shared<op::v1::StridedSlice>(
data, begin, end, vector<int64_t>{1, 0, 1, 0}, vector<int64_t>{1, 0, 1, 0});
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect end type exception not thrown.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("End mask must have element type i64"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, strided_slice_incompatible_size_of_masks_attr)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto begin = make_shared<op::Parameter>(element::i64, Shape{4});
auto end = make_shared<op::Parameter>(element::i64, Shape{4});
try
{
auto strided_slice = make_shared<op::v1::StridedSlice>(data,
begin,
end,
vector<int64_t>{1, 0, 1, 0},
vector<int64_t>{1, 0, 1, 0},
vector<int64_t>{1, 0, 1, 0, 1});
// Should have thrown, so fail if it didn't
FAIL() << "Incompatible size od masks exception not thrown.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("All masks of StridedSlice must have the same size"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, strided_slice_mask_incorrect_value)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto begin = make_shared<op::Parameter>(element::i64, Shape{4, 5});
auto end = make_shared<op::Parameter>(element::i64, Shape{4});
try
{
auto strided_slice = make_shared<op::v1::StridedSlice>(
data, begin, end, vector<int64_t>{1, 0, 1, 0}, vector<int64_t>{1, 0, 1, 2});
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect values of StridedSlice mask exception not thrown.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(),
std::string("All masks of StridedSlice must have be 0 or 1"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, strided_slice_begin_incorrect_shape)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto begin = make_shared<op::Parameter>(element::i64, Shape{4, 5});
auto end = make_shared<op::Parameter>(element::i64, Shape{4});
try
{
auto strided_slice = make_shared<op::v1::StridedSlice>(
data, begin, end, vector<int64_t>{1, 0, 1, 0}, vector<int64_t>{1, 0, 1, 0});
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect shape of begin exception not thrown.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("Begin input must be 1D (begin rank:"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, strided_slice_end_incorrect_shape)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto begin = make_shared<op::Parameter>(element::i64, Shape{4});
auto end = make_shared<op::Parameter>(element::i64, Shape{4, 5});
try
{
auto strided_slice = make_shared<op::v1::StridedSlice>(
data, begin, end, vector<int64_t>{1, 0, 1, 0}, vector<int64_t>{1, 0, 1, 0});
// Should have thrown, so fail if it didn't
FAIL() << "Incorrect shape of end exception not thrown.";
}
catch (const NodeValidationFailure& error)
{
EXPECT_HAS_SUBSTRING(error.what(), std::string("End input must be 1D (end rank:"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment