Unverified Commit 68ef3faa authored by Adam Procter's avatar Adam Procter Committed by GitHub

Generalized constant-padding op (#383)

parent 0f836183
......@@ -57,6 +57,7 @@ set (SRC
ops/not.cpp
ops/one_hot.cpp
ops/op.cpp
ops/pad.cpp
ops/parameter.cpp
ops/power.cpp
ops/reduce.cpp
......
......@@ -109,8 +109,9 @@ CoordinateTransform::CoordinateTransform(const Shape& source_shape,
for (size_t i = 0; i < m_n_axes; i++)
{
if (source_end_corner[i] > (source_shape[i] - 1) * target_dilation_strides[i] + 1 +
target_padding_below[i] + target_padding_above[i])
if (source_end_corner[i] >
subtract_or_zero(source_shape[i], size_t(1)) * target_dilation_strides[i] + 1 +
target_padding_below[i] + target_padding_above[i])
{
std::stringstream ss;
......@@ -334,8 +335,9 @@ bool CoordinateTransform::has_source_coordinate(const Coordinate& c_target) cons
size_t pos_depadded = pos_deshifted - m_target_padding_below[target_axis];
// If we are in the above-padding, we have no source coordinate.
if (pos_depadded >=
((m_source_shape[source_axis] - 1) * m_target_dilation_strides[target_axis]) + 1)
if (m_source_shape[source_axis] == 0 ||
(pos_depadded >=
((m_source_shape[source_axis] - 1) * m_target_dilation_strides[target_axis]) + 1))
{
return false;
}
......
......@@ -97,6 +97,7 @@
#include "ngraph/ops/not_equal.hpp"
#include "ngraph/ops/one_hot.hpp"
#include "ngraph/ops/op.hpp"
#include "ngraph/ops/pad.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/reduce.hpp"
......
......@@ -124,7 +124,7 @@ namespace ngraph
/// \return The padding-below sizes.
const Shape& get_padding_below() const { return m_padding_below; }
/// \return The padding-above sizes.
const Strides& get_padding_above() const { return m_padding_above; }
const Shape& get_padding_above() const { return m_padding_above; }
/// \return The input image dilation strides.
const Strides& get_image_dilation_strides() const { return m_image_dilation_strides; }
/// \return The number of input channels.
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/ops/pad.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
op::Pad::Pad(const std::shared_ptr<Node>& arg,
const std::shared_ptr<Node>& arg_pad_value,
const Shape& padding_below,
const Shape& padding_above,
const Shape& padding_interior)
: RequiresTensorViewArgs("Pad", {arg, arg_pad_value})
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_padding_interior(padding_interior)
{
if (get_input_element_type(0) != get_input_element_type(1))
{
throw ngraph_error("Pad argument tensor and padding value element types do not match");
}
if (get_input_shape(1) != Shape{})
{
throw ngraph_error("Padding value for pad is not a scalar");
}
auto arg_shape = get_input_shape(0);
if (arg_shape.size() != padding_below.size())
{
throw ngraph_error("Pad rank for below-padding does not match rank of argument tensor");
}
if (arg_shape.size() != padding_above.size())
{
throw ngraph_error("Pad rank for above-padding does not match rank of argument tensor");
}
if (arg_shape.size() != padding_interior.size())
{
throw ngraph_error("Pad rank for interior padding does not match rank of argument tensor");
}
Shape result_shape;
for (size_t i = 0; i < arg_shape.size(); i++)
{
result_shape.push_back(
padding_below[i] +
subtract_or_zero(arg_shape[i] * (padding_interior[i] + 1), padding_interior[i]) +
padding_above[i]);
}
set_value_type_checked(get_input_element_type(0), result_shape);
}
std::shared_ptr<Node>
op::Pad::copy_with_new_args(const std::vector<std::shared_ptr<Node>>& new_args) const
{
if (new_args.size() != 2)
{
throw ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<Pad>(
new_args.at(0), new_args.at(1), m_padding_below, m_padding_above, m_padding_interior);
}
bool op::Pad::is_functionally_identical(const Node& other) const
{
bool rc = true;
if (Node::is_functionally_identical(other))
{
const Pad& rhs = dynamic_cast<const Pad&>(other);
rc &= m_padding_below == rhs.m_padding_below;
rc &= m_padding_above == rhs.m_padding_above;
rc &= m_padding_interior == rhs.m_padding_interior;
}
else
{
rc = false;
}
return rc;
}
/* The "y" half of this is going to be a bit tricky... best way to handle it, I think,
is to ReplaceSlice the non-padded values in the incoming delta tensor with a zero
broadcasted to x's shape; then sum that and backprop the result to y.
For example, let's say we are padding a 2x2 with 1 above, below, and interior, and
the deltas coming back are:
d00 d01 d02 d03 d04
d10 d11 d12 d13 d14
d20 d21 d22 d23 d24
d30 d31 d32 d33 d34
d40 d41 d42 d43 d44
We know that everything but d11, d13, d31, and d33 on the forward prop is just "y".
So we mask that off (using the forward-prop padding values to determine start, end,
and slice stride):
d00 d01 d02 d03 d04
d10 0 d12 0 d14
d20 d21 d22 d23 d24
d30 0 d32 0 d34
d40 d41 d42 d43 d44
Then sum that up:
d00 + d01 + d02 + d03 + d04 +
d10 + 0 + d12 + 0 + d14 +
d20 + d21 + d22 + d23 + d24 +
d30 + 0 + d32 + 0 + d34 +
d40 + d41 + d42 + d43 + d44
For the "x" backprop it's sort of the opposite; just slice out:
d11 d13
d31 d33
and push that back.
*/
void op::Pad::generate_adjoints(autodiff::Adjoints& adjoints, const std::shared_ptr<Node>& delta)
{
throw std::invalid_argument("Autodiff is not yet implemented for Pad");
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Generic constant-padding operation.
///
/// Takes an input tensor of shape \f$(d_1,\dots,d_n)\f$ and pads by inserting a scalar \f$x\f$ supplied as input, in three possible ways:
///
/// 1. <i>(exterior padding)</i> inserts copies of \f$x\f$ <i>below or above</i> the bounds of existing rows, columns, etc.,
/// 2. <i>(interior padding)</i> inserts copies of \f$x\f$ <i>between</i> rows, columns, etc., or
/// 3. both of the above.
///
/// The number and position of elements to be inserted along a given axis is determined by three parameters:
///
/// 1. <i>(the padding-below sizes)</i> a vector of non-negative integers \f$(p_1,\dots,p_n)\f$,
/// 2. <i>(the padding-above sizes)</i> a vector of non-negative integers \f$(q_1,\dots,q_n)\f$, and
/// 3. <i>(the interior padding sizes)</i> a vector of non-negative integers \f$(r_1,\dots,r_n)\f$.
///
/// The output tensor will have the shape \f$(d'_1,\dots,d'_n)\f$ where \f$d'_i = p_i + (d_i - 1)(r_i + 1) + 1 + q_i\f$ if \f$d_i > 0\f$, and \f$d'_i = p_i + q_i\f$ if \f$d_i = 0\f$.
///
/// Example: given a 3x3 tensor, with interior-padding sizes of `{1,2}`, padding-below of `{1,2}`, padding-above of `{1,0}`, and a pad-value of `42`, we obtain:
///
/// ```
/// 42 42 42 42 42 42 42 42 42
/// 42 42 1 42 42 2 42 42 3
/// 1 2 3 42 42 42 42 42 42 42 42 42
/// 4 5 6 --> 42 42 4 42 42 5 42 42 6
/// 7 8 9 42 42 42 42 42 42 42 42 42
/// 42 42 7 42 42 8 42 42 9
/// 42 42 42 42 42 42 42 42 42
/// ```
///
/// In other words we have inserted one new row between each pair of adjacent rows, two new columns between each pair of adjacent columns, one new row at
/// the top and two new columns on the left, and one new row at the bottom and zero new columns on the right; then filled the new rows and columns with `42`.
///
/// (Note that `below` and `above` here refer respectively to lower- or higher-numbered coordinate indices, and numbering starts at the upper-left corner;
/// thus inserting a row "below" actually inserts it at the "top" of the matrix.)
///
class Pad : public RequiresTensorViewArgs
{
public:
/// \brief Constructs a generic padding operation.
///
/// \param arg The node producing input tensor to be padded.
/// \param arg_pad_value The node producing the scalar value to be inserted for padding.
/// \param padding_below The padding-below widths.
/// \param padding_above The padding-above widths.
/// \param padding_interior The interior-padding widths.
Pad(const std::shared_ptr<Node>& arg,
const std::shared_ptr<Node>& arg_pad_value,
const Shape& padding_below,
const Shape& padding_above,
const Shape& padding_interior);
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override;
/// \return The padding-below sizes.
const Shape& get_padding_below() const { return m_padding_below; }
/// \return The padding-above sizes.
const Shape& get_padding_above() const { return m_padding_above; }
/// \return The interior padding sizes.
const Shape& get_padding_interior() const { return m_padding_interior; }
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
Shape m_padding_below;
Shape m_padding_above;
Shape m_padding_interior;
};
}
}
......@@ -31,6 +31,7 @@
#include "ngraph/ops/get_output_element.hpp"
#include "ngraph/ops/max_pool.hpp"
#include "ngraph/ops/one_hot.hpp"
#include "ngraph/ops/pad.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/replace_slice.hpp"
......@@ -1990,6 +1991,26 @@ void runtime::cpu::CPU_Emitter::EmitAvgPool(codegen::CodeWriter& writer,
writer << " {" << join(avg_pool->get_padding_above()) << "});\n";
}
void runtime::cpu::CPU_Emitter::EmitPad(codegen::CodeWriter& writer,
const ngraph::Node* n,
const vector<runtime::cpu::TensorViewWrapper>& args,
const vector<runtime::cpu::TensorViewWrapper>& out)
{
auto pad = static_cast<const op::Pad*>(n);
auto arg0_shape = args[0].get_shape();
auto result_shape = out[0].get_shape();
writer << "kernel::pad<" << out[0].get_type() << ">(" << args[0].get_name() << ",\n";
writer << " " << args[1].get_name() << ",\n";
writer << " " << out[0].get_name() << ",\n";
writer << " {" << join(arg0_shape) << "},\n";
writer << " {" << join(result_shape) << "},\n";
writer << " {" << join(pad->get_padding_below()) << "},\n";
writer << " {" << join(pad->get_padding_above()) << "},\n";
writer << " {" << join(pad->get_padding_interior()) << "});\n";
}
//------------------------------------------------------------------------------------------------
// Utility methods
//------------------------------------------------------------------------------------------------
......
......@@ -91,6 +91,7 @@ namespace ngraph
static void EMITTER_DECL(EmitReduceWindow);
static void EMITTER_DECL(EmitSelectAndScatter);
static void EMITTER_DECL(EmitAvgPool);
static void EMITTER_DECL(EmitPad);
private:
static std::string emit_vector(const TensorViewWrapper&,
......
......@@ -65,6 +65,7 @@
#include "ngraph/ops/not.hpp"
#include "ngraph/ops/not_equal.hpp"
#include "ngraph/ops/one_hot.hpp"
#include "ngraph/ops/pad.hpp"
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reduce_window.hpp"
......@@ -191,6 +192,7 @@ static const runtime::cpu::OpMap dispatcher{
{TI(ngraph::op::ReduceWindow), &runtime::cpu::CPU_Emitter::EmitReduceWindow},
{TI(ngraph::op::SelectAndScatter), &runtime::cpu::CPU_Emitter::EmitSelectAndScatter},
{TI(ngraph::op::AvgPool), &runtime::cpu::CPU_Emitter::EmitAvgPool},
{TI(ngraph::op::Pad), &runtime::cpu::CPU_Emitter::EmitPad},
};
runtime::cpu::CPU_ExternalFunction::CPU_ExternalFunction(
......@@ -241,6 +243,7 @@ void runtime::cpu::CPU_ExternalFunction::compile()
#include "ngraph/runtime/kernel/max_pool.hpp"
#include "ngraph/runtime/kernel/not.hpp"
#include "ngraph/runtime/kernel/one_hot.hpp"
#include "ngraph/runtime/kernel/pad.hpp"
#include "ngraph/runtime/kernel/reduce.hpp"
#include "ngraph/runtime/kernel/reduce_window.hpp"
#include "ngraph/runtime/kernel/replace_slice.hpp"
......
......@@ -29,6 +29,7 @@
#include "ngraph/ops/dot.hpp"
#include "ngraph/ops/max_pool.hpp"
#include "ngraph/ops/one_hot.hpp"
#include "ngraph/ops/pad.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reduce_window.hpp"
#include "ngraph/ops/replace_slice.hpp"
......@@ -73,6 +74,7 @@
#include "ngraph/runtime/kernel/not.hpp"
#include "ngraph/runtime/kernel/not_equal.hpp"
#include "ngraph/runtime/kernel/one_hot.hpp"
#include "ngraph/runtime/kernel/pad.hpp"
#include "ngraph/runtime/kernel/power.hpp"
#include "ngraph/runtime/kernel/reduce.hpp"
#include "ngraph/runtime/kernel/reduce_window.hpp"
......@@ -476,6 +478,19 @@ private:
else if (node_op == "Parameter")
{
}
else if (node_op == "Pad")
{
ngraph::op::Pad* pad = dynamic_cast<ngraph::op::Pad*>(&node);
kernel::pad(reinterpret_cast<T*>(args[0]->get_data_ptr()),
reinterpret_cast<T*>(args[1]->get_data_ptr()),
reinterpret_cast<T*>(out[0]->get_data_ptr()),
node.get_inputs().at(0).get_shape(),
node.get_output_shape(0),
pad->get_padding_below(),
pad->get_padding_above(),
pad->get_padding_interior());
}
else if (node_op == "Power")
{
kernel::power<T>(reinterpret_cast<T*>(args[0]->get_data_ptr()),
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <cmath>
#include "ngraph/common.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/util.hpp"
namespace ngraph
{
namespace runtime
{
namespace kernel
{
template <typename T>
void pad(T* arg0,
T* arg1,
T* out,
const Shape& arg0_shape,
const Shape& out_shape,
const Shape& padding_below,
const Shape& padding_above,
const Shape& padding_interior)
{
Coordinate input_start(arg0_shape.size(), 0); // start at (0,0,...,0)
Coordinate input_end =
out_shape; // end at (d'0,d'1,...,d'n), the outer corner of the post-padding shape
Strides input_strides(arg0_shape.size(), 1);
AxisVector input_axis_order(arg0_shape.size());
for (size_t i = 0; i < arg0_shape.size(); i++)
{
input_axis_order[i] = i;
}
Shape input_dilation(arg0_shape.size());
for (size_t i = 0; i < arg0_shape.size(); i++)
{
input_dilation[i] = padding_interior[i] + 1;
}
CoordinateTransform input_transform(arg0_shape,
input_start,
input_end,
input_strides,
input_axis_order,
padding_below,
padding_above,
input_dilation);
CoordinateTransform output_transform(out_shape);
CoordinateTransform::Iterator output_it = output_transform.begin();
for (const Coordinate& in_coord : input_transform)
{
const Coordinate& out_coord = *output_it;
T v = input_transform.has_source_coordinate(in_coord)
? arg0[input_transform.index(in_coord)]
: *arg1;
out[output_transform.index(out_coord)] = v;
++output_it;
}
}
}
}
}
......@@ -237,6 +237,12 @@ namespace ngraph
return (x == 0 ? 0 : (1 + (x - 1) / y));
}
template <typename T>
T subtract_or_zero(T x, T y)
{
return y > x ? 0 : x - y;
}
void* aligned_alloc(size_t alignment, size_t size);
void aligned_free(void*);
size_t round_up(size_t size, size_t alignment);
......
This diff is collapsed.
......@@ -5573,3 +5573,223 @@ TEST(type_prop, avg_pool_invalid_movement_stride_0)
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, pad_deduce_1d_exterior)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{50});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{});
auto padding_below = Shape{2};
auto padding_above = Shape{3};
auto padding_interior = Shape{0};
auto pad = make_shared<op::Pad>(param0, param1, padding_below, padding_above, padding_interior);
EXPECT_EQ(pad->get_element_type(), element::f32);
EXPECT_EQ(pad->get_shape(), (Shape{55}));
EXPECT_EQ(pad->get_padding_below(), (Shape{2}));
EXPECT_EQ(pad->get_padding_above(), (Shape{3}));
EXPECT_EQ(pad->get_padding_interior(), (Shape{0}));
}
TEST(type_prop, pad_deduce_1d_interior)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{50});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{});
auto padding_below = Shape{0};
auto padding_above = Shape{0};
auto padding_interior = Shape{2};
auto pad = make_shared<op::Pad>(param0, param1, padding_below, padding_above, padding_interior);
EXPECT_EQ(pad->get_element_type(), element::f32);
EXPECT_EQ(pad->get_shape(), (Shape{148}));
EXPECT_EQ(pad->get_padding_below(), (Shape{0}));
EXPECT_EQ(pad->get_padding_above(), (Shape{0}));
EXPECT_EQ(pad->get_padding_interior(), (Shape{2}));
}
TEST(type_prop, pad_deduce_1d_interior_exterior)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{50});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{});
auto padding_below = Shape{5};
auto padding_above = Shape{6};
auto padding_interior = Shape{2};
auto pad = make_shared<op::Pad>(param0, param1, padding_below, padding_above, padding_interior);
EXPECT_EQ(pad->get_element_type(), element::f32);
EXPECT_EQ(pad->get_shape(), (Shape{159}));
EXPECT_EQ(pad->get_padding_below(), (Shape{5}));
EXPECT_EQ(pad->get_padding_above(), (Shape{6}));
EXPECT_EQ(pad->get_padding_interior(), (Shape{2}));
}
TEST(type_prop, pad_deduce_2d_interior_exterior)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{50, 40});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{});
auto padding_below = Shape{5, 3};
auto padding_above = Shape{6, 9};
auto padding_interior = Shape{2, 3};
auto pad = make_shared<op::Pad>(param0, param1, padding_below, padding_above, padding_interior);
EXPECT_EQ(pad->get_element_type(), element::f32);
EXPECT_EQ(pad->get_shape(), (Shape{159, 169}));
EXPECT_EQ(pad->get_padding_below(), (Shape{5, 3}));
EXPECT_EQ(pad->get_padding_above(), (Shape{6, 9}));
EXPECT_EQ(pad->get_padding_interior(), (Shape{2, 3}));
}
TEST(type_prop, pad_deduce_3d_interior_exterior)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{50, 40, 20});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{});
auto padding_below = Shape{5, 3, 0};
auto padding_above = Shape{6, 9, 4};
auto padding_interior = Shape{2, 3, 0};
auto pad = make_shared<op::Pad>(param0, param1, padding_below, padding_above, padding_interior);
EXPECT_EQ(pad->get_element_type(), element::f32);
EXPECT_EQ(pad->get_shape(), (Shape{159, 169, 24}));
EXPECT_EQ(pad->get_padding_below(), (Shape{5, 3, 0}));
EXPECT_EQ(pad->get_padding_above(), (Shape{6, 9, 4}));
EXPECT_EQ(pad->get_padding_interior(), (Shape{2, 3, 0}));
}
TEST(type_prop, pad_deduce_element_type_mismatch)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{50, 40, 20});
auto param1 = make_shared<op::Parameter>(element::i32, Shape{});
auto padding_below = Shape{5, 3, 0};
auto padding_above = Shape{6, 9, 4};
auto padding_interior = Shape{2, 3, 0};
try
{
auto pad =
make_shared<op::Pad>(param0, param1, padding_below, padding_above, padding_interior);
// Should have thrown, so fail if it didn't
FAIL() << "Element tpye mismatch not detected";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(),
std::string("Pad argument tensor and padding value element types do not match"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, pad_deduce_nonscalar_pad_value)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{50, 40, 20});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{6});
auto padding_below = Shape{5, 3, 0};
auto padding_above = Shape{6, 9, 4};
auto padding_interior = Shape{2, 3, 0};
try
{
auto pad =
make_shared<op::Pad>(param0, param1, padding_below, padding_above, padding_interior);
// Should have thrown, so fail if it didn't
FAIL() << "Non-scalar pad value not detected";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(), std::string("Padding value for pad is not a scalar"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, pad_deduce_below_padding_wrong_rank)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{50, 40, 20});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{});
auto padding_below = Shape{5, 3, 0, 6};
auto padding_above = Shape{6, 9, 4};
auto padding_interior = Shape{2, 3, 0};
try
{
auto pad =
make_shared<op::Pad>(param0, param1, padding_below, padding_above, padding_interior);
// Should have thrown, so fail if it didn't
FAIL() << "Wrong below-padding rank not detected";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(),
std::string("Pad rank for below-padding does not match rank of argument tensor"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, pad_deduce_above_padding_wrong_rank)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{50, 40, 20});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{});
auto padding_below = Shape{5, 3, 0};
auto padding_above = Shape{6, 9};
auto padding_interior = Shape{2, 3, 0};
try
{
auto pad =
make_shared<op::Pad>(param0, param1, padding_below, padding_above, padding_interior);
// Should have thrown, so fail if it didn't
FAIL() << "Wrong above-padding rank not detected";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(error.what(),
std::string("Pad rank for above-padding does not match rank of argument tensor"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, pad_deduce_interior_padding_wrong_rank)
{
// Deduce type
auto param0 = make_shared<op::Parameter>(element::f32, Shape{50, 40, 20});
auto param1 = make_shared<op::Parameter>(element::f32, Shape{});
auto padding_below = Shape{5, 3, 0};
auto padding_above = Shape{6, 9, 4};
auto padding_interior = Shape{2, 3, 0, 9, 3};
try
{
auto pad =
make_shared<op::Pad>(param0, param1, padding_below, padding_above, padding_interior);
// Should have thrown, so fail if it didn't
FAIL() << "Wrong interior padding rank not detected";
}
catch (const ngraph_error& error)
{
EXPECT_EQ(
error.what(),
std::string("Pad rank for interior padding does not match rank of argument tensor"));
}
catch (...)
{
FAIL() << "Deduced type check failed for unexpected reason";
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment