Unverified Commit 1cfa3d66 authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Merge branch 'master' into gwenger/deprecate_copy_from

parents 1f39edbe f6a404eb
......@@ -60,6 +60,8 @@ ExternalProject_Add(
${GTEST_CMAKE_ARGS}
BINARY_DIR "${EXTERNAL_PROJECTS_ROOT}/gtest/build"
EXCLUDE_FROM_ALL TRUE
BUILD_BYPRODUCTS ${CMAKE_BINARY_DIR}/ngraph/gtest/build/googlemock/gtest/libgtest.a
BUILD_BYPRODUCTS ${CMAKE_BINARY_DIR}/ngraph/gtest/build/googlemock/libgmock.a
)
#------------------------------------------------------------------------------
......
......@@ -310,7 +310,7 @@ def scale_shift(data, scale, shift, name=None): # type: (Node, Node, Node, str)
@nameable_op
def space_to_depth(data, block_size, name=None): # type: (Node, int, str) -> Node
def space_to_depth(data, mode, block_size, name=None): # type: (Node, str, int, str) -> Node
"""Perform SpaceToDepth operation on the input tensor.
SpaceToDepth rearranges blocks of spatial data into depth.
......@@ -318,11 +318,16 @@ def space_to_depth(data, block_size, name=None): # type: (Node, int, str) -> No
and width dimensions are moved to the depth dimension.
:param data: The node with data tensor.
:param mode: Specifies how the output depth dimension is gathered from block coordinates.
blocks_first: The output depth is gathered from [block_size, ..., block_size, C]
depth_first: The output depth is gathered from [C, block_size, ..., block_size]
:param block_size: The size of the block of values to be moved. Scalar value.
:param name: Optional output node name.
:return: The new node performing a SpaceToDepth operation on input tensor.
"""
return SpaceToDepth(data, block_size)
return SpaceToDepth(data, mode, block_size)
@nameable_op
......
......@@ -27,5 +27,5 @@ void regclass_pyngraph_op_SpaceToDepth(py::module m)
py::class_<ngraph::op::SpaceToDepth, std::shared_ptr<ngraph::op::SpaceToDepth>, ngraph::op::Op>
spacetodepth(m, "SpaceToDepth");
spacetodepth.doc() = "ngraph.impl.op.SpaceToDepth wraps ngraph::op::SpaceToDepth";
spacetodepth.def(py::init<const std::shared_ptr<ngraph::Node>&, int&>());
spacetodepth.def(py::init<const std::shared_ptr<ngraph::Node>&, const std::string&, int&>());
}
......@@ -429,11 +429,12 @@ def test_space_to_depth_operator():
data_shape = [1, 2, 4, 4]
data_value = np.arange(start=0, stop=32, step=1.0, dtype=np.float32).reshape(data_shape)
mode = 'blocks_first'
block_size = 2
parameter_data = ng.parameter(data_shape, name='Data', dtype=np.float32)
model = ng.space_to_depth(parameter_data, block_size)
model = ng.space_to_depth(parameter_data, mode, block_size)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
......
......@@ -29,7 +29,9 @@ namespace ngraph
{
auto data = node.get_ng_inputs().at(0);
std::size_t block_size = node.get_attribute_value<std::int64_t>("blocksize");
return NodeVector{std::make_shared<ngraph::op::SpaceToDepth>(data, block_size)};
const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST;
return NodeVector{
std::make_shared<ngraph::op::SpaceToDepth>(data, mode, block_size)};
}
} // namespace set_1
......
......@@ -35,7 +35,10 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"Add", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an uninitialized addition operation
Add() = default;
Add()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
{
}
/// \brief Constructs an addition operation.
///
......@@ -71,7 +74,10 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"Add", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs an uninitialized addition operation
Add() = default;
Add()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY)
{
}
/// \brief Constructs an addition operation.
///
......
......@@ -31,7 +31,10 @@ namespace ngraph
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Atan2", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Atan2() = default;
Atan2()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
{
}
/// \brief atan2(y,x) is the angle from the origin to the point (x,y) (note reversed
/// order).
......
......@@ -32,7 +32,10 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"Divide", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a division operation.
Divide() = default;
Divide()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
{
}
/// \brief Constructs a division operation.
///
/// \param arg0 Node that produces the first input tensor.
......@@ -76,7 +79,11 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"Divide", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a division operation.
Divide() = default;
Divide()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY)
{
}
/// \brief Constructs a division operation.
///
/// \param arg0 Node that produces the first input tensor.
......
......@@ -66,12 +66,18 @@ shared_ptr<Node> op::Gelu::copy_with_new_args(const NodeVector& new_args) const
void op::Gelu::pre_validate_and_infer_types()
{
element::Type input_element_type = get_input_element_type(0);
PartialShape input_pshape = get_input_partial_shape(0);
NODE_VALIDATION_CHECK(this,
input_element_type.is_dynamic() || input_element_type.is_real(),
"Argument element type must be f16, bf16, f32, f64 or dynamic (got ",
input_element_type,
").");
if (input_pshape.is_dynamic())
{
set_output_type(0, input_element_type, input_pshape);
}
}
void op::Gelu::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
......@@ -94,12 +100,18 @@ op::GeluBackpropFactor::GeluBackpropFactor(const Output<Node>& x)
void op::GeluBackpropFactor::pre_validate_and_infer_types()
{
element::Type input_element_type = get_input_element_type(0);
PartialShape input_pshape = get_input_partial_shape(0);
NODE_VALIDATION_CHECK(this,
input_element_type.is_dynamic() || input_element_type.is_real(),
"Argument element type must be f16, bf16, f32, f64 or dynamic (got ",
input_element_type,
").");
if (input_pshape.is_dynamic())
{
set_output_type(0, input_element_type, input_pshape);
}
}
shared_ptr<Node> op::GeluBackpropFactor::copy_with_new_args(const NodeVector& new_args) const
......
......@@ -100,6 +100,10 @@ void op::GroupConvolution::pre_validate_and_infer_types()
get_groups()) == data_shape.to_shape()[1],
"Incorrect number of channels per filter");
}
else
{
set_output_type(0, get_input_element_type(0), PartialShape::dynamic());
}
}
void op::GroupConvolution::post_validate_and_infer_types()
......
......@@ -170,7 +170,7 @@ shared_ptr<Node> op::LayerNorm::copy_with_new_args(const NodeVector& new_args) c
}
}
void op::LayerNorm::pre_validate_and_infer_types()
void op::LayerNorm::validate_and_infer_types()
{
element::Type input_element_type = get_input_element_type(0);
......@@ -509,7 +509,7 @@ shared_ptr<Node> op::LayerNormBackprop::copy_with_new_args(const NodeVector& new
}
}
void op::LayerNormBackprop::pre_validate_and_infer_types()
void op::LayerNormBackprop::validate_and_infer_types()
{
element::Type input_element_type = get_input_element_type(0);
......
......@@ -55,7 +55,7 @@ namespace ngraph
virtual NodeVector decompose_op() const override;
void pre_validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
......@@ -121,7 +121,7 @@ namespace ngraph
virtual NodeVector decompose_op() const override;
void pre_validate_and_infer_types() override;
void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
......
......@@ -25,13 +25,21 @@ using namespace ngraph;
constexpr NodeTypeInfo op::SpaceToDepth::type_info;
op::SpaceToDepth::SpaceToDepth(const Output<Node>& data, const size_t block_size)
op::SpaceToDepth::SpaceToDepth(const Output<Node>& data,
const SpaceToDepthMode& mode,
size_t block_size)
: FusedOp({data})
, m_blocksize(block_size)
, m_mode(mode)
{
constructor_validate_and_infer_types();
}
op::SpaceToDepth::SpaceToDepth(const Output<Node>& data, const std::string& mode, size_t block_size)
: SpaceToDepth(data, mode_from_string(mode), block_size)
{
}
NodeVector op::SpaceToDepth::decompose_op() const
{
auto data = input_value(0);
......@@ -74,7 +82,17 @@ NodeVector op::SpaceToDepth::decompose_op() const
// rearrange them so as appropriate chunks of data where close to their
// destination place. Finally squeeze data from respective dimensions.
Output<Node> flat_node = builder::reshape(data, Shape{n, c, h_flat, bs, w_flat, bs});
flat_node = builder::reorder_axes(flat_node, {0, 3, 5, 1, 2, 4});
switch (m_mode)
{
case SpaceToDepthMode::DEPTH_FIRST:
{
flat_node = builder::reorder_axes(flat_node, {0, 1, 3, 5, 2, 4});
break;
}
case SpaceToDepthMode::BLOCKS_FIRST:
default: { flat_node = builder::reorder_axes(flat_node, {0, 3, 5, 1, 2, 4});
}
}
return NodeVector{builder::reshape(flat_node, Shape{n, c_high, h_flat, w_flat})};
}
......@@ -84,5 +102,17 @@ shared_ptr<Node> op::SpaceToDepth::copy_with_new_args(const NodeVector& new_args
{
throw ngraph_error("Incorrect number of new arguments");
}
return make_shared<SpaceToDepth>(new_args.at(0), m_blocksize);
return make_shared<SpaceToDepth>(new_args.at(0), m_mode, m_blocksize);
}
op::SpaceToDepth::SpaceToDepthMode op::SpaceToDepth::mode_from_string(const std::string& mode) const
{
static const std::map<std::string, SpaceToDepthMode> allowed_values = {
{"blocks_first", SpaceToDepthMode::BLOCKS_FIRST},
{"depth_first", SpaceToDepthMode::DEPTH_FIRST}};
NODE_VALIDATION_CHECK(
this, allowed_values.count(mode) > 0, "Invalid 'depth_to_space_mode' value passed in.");
return allowed_values.at(mode);
}
......@@ -32,6 +32,14 @@ namespace ngraph
class SpaceToDepth : public ngraph::op::util::FusedOp
{
public:
enum class SpaceToDepthMode
{
// The output depth is gathered from [block_size, ..., block_size, C]
BLOCKS_FIRST,
// The output depth is gathered from [C, block_size, ..., block_size]
DEPTH_FIRST
};
NGRAPH_API
static constexpr NodeTypeInfo type_info{"SpaceToDepth", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
......@@ -39,10 +47,19 @@ namespace ngraph
/// \brief Constructs a SpaceToDepth operation.
///
/// \param data - Node producing the input tensor
/// \param mode Specifies how the output depth dimension is gathered
/// from block coordinates and the old depth dimension.
/// \param block_size - the size of the block of values to be moved
SpaceToDepth(const Output<Node>& data, std::size_t block_size);
SpaceToDepth(const Output<Node>& data,
const SpaceToDepthMode& mode,
std::size_t block_size = 1);
SpaceToDepth(const Output<Node>& data,
const std::string& mode,
std::size_t block_size = 1);
std::size_t get_block_size() const { return m_blocksize; }
SpaceToDepthMode get_mode() const { return m_mode; }
virtual NodeVector decompose_op() const override;
virtual std::shared_ptr<Node>
......@@ -50,6 +67,8 @@ namespace ngraph
protected:
std::size_t m_blocksize;
SpaceToDepthMode m_mode;
SpaceToDepthMode mode_from_string(const std::string& mode) const;
};
}
}
......@@ -32,7 +32,10 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"Maximum", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a maximum operation.
Maximum() = default;
Maximum()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
{
}
/// \brief Constructs a maximum operation.
///
/// \param arg0 Node that produces the first input tensor.
......@@ -62,7 +65,11 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"Maximum", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a maximum operation.
Maximum() = default;
Maximum()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY)
{
}
/// \brief Constructs a maximum operation.
///
/// \param arg0 Node that produces the first input tensor.
......
......@@ -32,7 +32,10 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"Minimum", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a minimum operation.
Minimum() = default;
Minimum()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
{
}
/// \brief Constructs a minimum operation.
///
/// \param arg0 Node that produces the first input tensor.
......@@ -62,7 +65,11 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"Minimum", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a minimum operation.
Minimum() = default;
Minimum()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY)
{
}
/// \brief Constructs a minimum operation.
///
/// \param arg0 Node that produces the first input tensor.
......
......@@ -32,7 +32,10 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"Multiply", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a multiplication operation.
Multiply() = default;
Multiply()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
{
}
/// \brief Constructs a multiplication operation.
///
/// \param arg0 Node that produces the first input tensor.
......@@ -62,7 +65,11 @@ namespace ngraph
static constexpr NodeTypeInfo type_info{"Multiply", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
/// \brief Constructs a multiplication operation.
Multiply() = default;
Multiply()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY)
{
}
/// \brief Constructs a multiplication operation.
///
/// \param arg0 Node that produces the first input tensor.
......
......@@ -46,7 +46,10 @@ namespace ngraph
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Power", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Power() = default;
Power()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
{
}
/// \brief Constructs an exponentiation operation.
///
/// \param arg0 Node that produces the first input tensor.
......@@ -89,7 +92,11 @@ namespace ngraph
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Power", 1};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Power() = default;
Power()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NUMPY)
{
}
/// \brief Constructs an exponentiation operation.
///
/// \param arg0 Node that produces the first input tensor.
......
......@@ -36,7 +36,7 @@ shared_ptr<Node> op::Relu::copy_with_new_args(const NodeVector& new_args) const
}
op::ReluBackprop::ReluBackprop(shared_ptr<Node> arg, shared_ptr<Node> delta)
: BinaryElementwiseArithmetic(arg, delta)
: BinaryElementwiseArithmetic(arg, delta, AutoBroadcastSpec::NONE)
{
constructor_validate_and_infer_types();
}
......
......@@ -37,7 +37,7 @@ op::Sigmoid::Sigmoid(const Output<Node>& arg)
}
op::SigmoidBackprop::SigmoidBackprop(const Output<Node>& arg, const Output<Node>& delta)
: BinaryElementwiseArithmetic(arg, delta)
: BinaryElementwiseArithmetic(arg, delta, AutoBroadcastSpec::NONE)
{
constructor_validate_and_infer_types();
}
......
......@@ -47,7 +47,11 @@ namespace ngraph
NGRAPH_API
static constexpr NodeTypeInfo type_info{"SigmoidBackprop", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
SigmoidBackprop() = default;
SigmoidBackprop()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
{
}
/// \brief Constructs a SigmoidBackprop operation.
///
/// \param arg Node that produces the Sigmoid forward input tensor.
......
......@@ -29,7 +29,11 @@ namespace ngraph
NGRAPH_API
static constexpr NodeTypeInfo type_info{"Subtract", 0};
const NodeTypeInfo& get_type_info() const override { return type_info; }
Subtract() = default;
Subtract()
: util::BinaryElementwiseArithmetic(AutoBroadcastSpec::NONE)
{
}
/// \brief Constructs a subtraction operation.
///
/// \param arg0 Node that produces the first input tensor.
......
......@@ -19,6 +19,9 @@
using namespace ngraph;
const op::AutoBroadcastSpec op::AutoBroadcastSpec::NUMPY(AutoBroadcastType::NUMPY, 0);
const op::AutoBroadcastSpec op::AutoBroadcastSpec::NONE{AutoBroadcastType::NONE, 0};
namespace ngraph
{
template <>
......
......@@ -20,6 +20,7 @@
#include <ostream>
#include "ngraph/attribute_adapter.hpp"
#include "ngraph/ngraph_visibility.hpp"
#include "ngraph/type.hpp"
namespace ngraph
......@@ -269,6 +270,11 @@ namespace ngraph
{
return a.m_type == m_type && a.m_axis == m_axis;
}
NGRAPH_API
static const AutoBroadcastSpec NUMPY;
NGRAPH_API
static const AutoBroadcastSpec NONE;
};
}
}
......@@ -19,7 +19,8 @@
using namespace std;
using namespace ngraph;
op::util::BinaryElementwiseArithmetic::BinaryElementwiseArithmetic()
op::util::BinaryElementwiseArithmetic::BinaryElementwiseArithmetic(const AutoBroadcastSpec& autob)
: m_autob(autob)
{
}
......
......@@ -54,12 +54,12 @@ namespace ngraph
class BinaryElementwiseArithmetic : public Op
{
protected:
/// \brief Constructs a binary elementwise arithmetic operation.
BinaryElementwiseArithmetic();
BinaryElementwiseArithmetic(const AutoBroadcastSpec& autob);
/// \brief Constructs a binary elementwise arithmetic operation.
BinaryElementwiseArithmetic(const std::shared_ptr<Node>& arg0,
const std::shared_ptr<Node>& arg1,
const AutoBroadcastSpec& autob = AutoBroadcastSpec());
const AutoBroadcastSpec& autob);
/// \brief Constructs a binary elementwise arithmetic operation.
///
......@@ -67,7 +67,7 @@ namespace ngraph
/// \param arg1 Output that produces the second input tensor.
BinaryElementwiseArithmetic(const Output<Node>& arg0,
const Output<Node>& arg1,
const AutoBroadcastSpec& autob = AutoBroadcastSpec());
const AutoBroadcastSpec& autob);
/// \brief Constructs a binary elementwise arithmetic operation.
///
......@@ -77,7 +77,7 @@ namespace ngraph
BinaryElementwiseArithmetic(const std::string& node_type,
const std::shared_ptr<Node>& arg0,
const std::shared_ptr<Node>& arg1,
const AutoBroadcastSpec& autob = AutoBroadcastSpec());
const AutoBroadcastSpec& autob);
public:
void validate_and_infer_types() override;
......
This diff is collapsed.
......@@ -22,7 +22,7 @@ using namespace ngraph;
constexpr NodeTypeInfo op::GeluBackprop::type_info;
op::GeluBackprop::GeluBackprop(const Output<ngraph::Node>& arg, const Output<ngraph::Node>& delta)
: BinaryElementwiseArithmetic(arg, delta)
: BinaryElementwiseArithmetic(arg, delta, AutoBroadcastSpec::NONE)
{
constructor_validate_and_infer_types();
set_output_size(1);
......
......@@ -2595,7 +2595,8 @@ shared_ptr<Node> JSONDeserializer::deserialize_node(json node_js)
case OP_TYPEID::SpaceToDepth:
{
auto block_size = node_js.at("block_size").get<size_t>();
node = make_shared<op::SpaceToDepth>(args[0], block_size);
auto mode = node_js.at("mode").get<op::SpaceToDepth::SpaceToDepthMode>();
node = make_shared<op::SpaceToDepth>(args[0], mode, block_size);
break;
}
case OP_TYPEID::Split:
......@@ -4069,6 +4070,7 @@ json JSONSerializer::serialize_node(const Node& n)
{
auto tmp = static_cast<const op::SpaceToDepth*>(&n);
node["type"] = write_element_type(tmp->get_element_type());
node["mode"] = tmp->get_mode();
node["block_size"] = tmp->get_block_size();
break;
}
......
......@@ -573,10 +573,11 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_groups_included_in_shape)
EXPECT_EQ(expected, read_vector<float>(result0));
}
NGRAPH_TEST(${BACKEND_NAME}, space_to_depth)
NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_block_first)
{
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 2, 4, 4});
auto space_to_depth = make_shared<op::SpaceToDepth>(A, 2);
const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST;
auto space_to_depth = make_shared<op::SpaceToDepth>(A, mode, 2);
auto function = make_shared<Function>(NodeVector{space_to_depth}, ParameterVector{A});
auto test_case = test::NgraphTestCase(function, "${BACKEND_NAME}");
......@@ -593,6 +594,24 @@ NGRAPH_TEST(${BACKEND_NAME}, space_to_depth)
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_depth_first)
{
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 2, 4, 4});
const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST;
auto space_to_depth = make_shared<op::SpaceToDepth>(A, mode, 2);
auto function = make_shared<Function>(NodeVector{space_to_depth}, ParameterVector{A});
auto test_case = test::NgraphTestCase(function, "${BACKEND_NAME}");
test_case.add_input<float>({0.f, 16.f, 2.f, 18.f, 1.f, 17.f, 3.f, 19.f, 8.f, 24.f, 10.f,
26.f, 9.f, 25.f, 11.f, 27.f, 4.f, 20.f, 6.f, 22.f, 5.f, 21.f,
7.f, 23.f, 12.f, 28.f, 14.f, 30.f, 13.f, 29.f, 15.f, 31.f});
test_case.add_expected_output<float>(
Shape{1, 8, 2, 2}, {0.f, 2.f, 8.f, 10.f, 16.f, 18.f, 24.f, 26.f, 1.f, 3.f, 9.f,
11.f, 17.f, 19.f, 25.f, 27.f, 4.f, 6.f, 12.f, 14.f, 20.f, 22.f,
28.f, 30.f, 5.f, 7.f, 13.f, 15.f, 21.f, 23.f, 29.f, 31.f});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_block_first)
{
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 8, 2, 2});
......
......@@ -760,3 +760,25 @@ TEST(serialize, depth_to_space)
EXPECT_EQ(depth_to_space_out->get_block_size(), block_size);
EXPECT_EQ(depth_to_space_out->get_mode(), mode);
}
TEST(serialize, space_to_depth)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{4, 6, 8});
auto mode = op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST;
size_t block_size = 2;
auto space_to_depth_in = make_shared<op::SpaceToDepth>(arg, mode, block_size);
auto result = make_shared<op::Result>(space_to_depth_in);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_space_to_depth = g_result->input(0).get_source_output().get_node_shared_ptr();
auto depth_to_space_out = as_type_ptr<op::SpaceToDepth>(g_space_to_depth);
EXPECT_EQ(depth_to_space_out->description(), "SpaceToDepth");
EXPECT_EQ(depth_to_space_out->get_version(), 0);
EXPECT_EQ(depth_to_space_out->get_block_size(), block_size);
EXPECT_EQ(depth_to_space_out->get_mode(), mode);
}
......@@ -24,8 +24,28 @@ using namespace ngraph;
TEST(type_prop, space_to_depth)
{
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 2, 64, 64});
auto space_to_depth = make_shared<op::SpaceToDepth>(A, 8);
const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST;
auto space_to_depth = make_shared<op::SpaceToDepth>(A, mode, 8);
ASSERT_EQ(space_to_depth->get_element_type(), element::f32);
ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 128, 8, 8}));
}
TEST(type_prop, space_to_depth_input_rank_not_supported)
{
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 8, 8, 8, 4});
try
{
auto space_to_depth =
make_shared<op::DepthToSpace>(A, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2);
FAIL() << "Not supported input shape for SpaceToDepth exception not thrown";
}
catch (const ngraph_error& error)
{
EXPECT_HAS_SUBSTRING(error.what(), "The provided tensor shape: ");
}
catch (...)
{
FAIL() << "SpaceToDepth decomposition failed for unexpected reason";
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment