Unverified Commit 9264bc16 authored by Nick Korovaiko's avatar Nick Korovaiko Committed by GitHub

bprop for MaxPool (#391)

parent d43a0557
......@@ -13,6 +13,11 @@
// ----------------------------------------------------------------------------
#include "ngraph/ops/max_pool.hpp"
#include "ngraph/function.hpp"
#include "ngraph/ops/add.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/greater.hpp"
#include "ngraph/ops/select_and_scatter.hpp"
#include "ngraph/util.hpp"
using namespace std;
......@@ -168,8 +173,40 @@ bool op::MaxPool::is_functionally_identical(const Node& other) const
return rc;
}
/*
void op::MaxPool::generate_adjoints(autodiff::Adjoints& adjoints, const std::shared_ptr<Node>& delta)
void op::MaxPool::generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta)
{
auto shape_sel_a = Shape{};
auto etype = delta->get_element_type();
//Select Max
auto SEL_A = make_shared<op::Parameter>(etype, shape_sel_a);
auto shape_sel_b = Shape{};
auto SEL_B = make_shared<op::Parameter>(etype, shape_sel_b);
auto sel_f = std::make_shared<Function>(std::make_shared<op::Greater>(SEL_A, SEL_B),
op::Parameters{SEL_A, SEL_B});
//Update Cell
auto shape_scatter_a = Shape{};
auto SCATTER_A = make_shared<op::Parameter>(etype, shape_scatter_a);
auto shape_scatter_b = Shape{};
auto SCATTER_B = make_shared<op::Parameter>(etype, shape_scatter_b);
auto scatter_f =
make_shared<Function>(SCATTER_A + SCATTER_B, op::Parameters{SCATTER_A, SCATTER_B});
auto operand = get_input_op(0);
auto init_value =
std::make_shared<op::Constant>(etype, Shape{}, std::vector<std::string>({"0"}));
Strides strides{1, 1};
strides.push_back(m_window_movement_strides.at(0));
strides.push_back(m_window_movement_strides.at(1));
Shape shape{1, 1};
shape.push_back(m_window_shape.at(0));
shape.push_back(m_window_shape.at(1));
auto sas = std::make_shared<op::SelectAndScatter>(
operand, delta, init_value, sel_f, scatter_f, shape, strides);
adjoints.add_delta(operand, sas);
}
*/
......@@ -80,6 +80,9 @@ namespace ngraph
bool is_functionally_identical(const Node&) const override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
Shape m_window_shape;
Strides m_window_movement_strides;
......
......@@ -94,6 +94,103 @@ bool autodiff_numeric_compare_selective(
return test::all_close(results_num, results_sym, rtol, atol);
}
template <typename T>
static void copy_data(shared_ptr<runtime::TensorView> tv, const vector<T>& data)
{
size_t data_size = data.size() * sizeof(T);
tv->write(data.data(), 0, data_size);
}
TEST(${BACKEND_NAME}, backwards_maxpool_n4_c1_hw4_2x2_max)
{
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
auto shape_a = Shape{1, 4, 4, 4}; //in CHWN
auto maxpool_shape = Shape{1, 4, 3, 3};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
auto reshape = make_shared<op::Reshape>(
A, AxisVector{0, 3, 1, 2}, Shape{1, 4, 4, 4}); //convert CHWN to CNHW
auto window_shape = Shape{2, 2};
auto window_movement_strides = Strides{1, 1};
auto maxpool = make_shared<op::MaxPool>(reshape, window_shape, window_movement_strides);
auto f = make_shared<Function>(maxpool, op::Parameters{A});
shared_ptr<runtime::TensorView> ep =
backend->make_primary_tensor_view(element::i32, maxpool_shape);
vector<int> dataEp(shape_size(maxpool_shape), 4);
shared_ptr<runtime::TensorView> input =
backend->make_primary_tensor_view(element::i32, shape_a);
shared_ptr<runtime::TensorView> output =
backend->make_primary_tensor_view(element::i32, shape_a);
vector<int> dataInput{11, 65, 44, 28, 31, 33, 21, 66, 40, 49, 69, 57, 47, 30, 24, 27,
13, 56, 46, 60, 61, 41, 25, 42, 48, 53, 51, 43, 59, 58, 29, 71,
17, 22, 72, 18, 39, 35, 15, 38, 64, 52, 73, 67, 62, 50, 10, 68,
45, 63, 16, 14, 55, 54, 37, 20, 36, 12, 70, 34, 19, 26, 32, 23};
vector<int> expected{//delta
0, 4, 0, 0, 0, 0, 0, 8, 0, 0, 8, 0, 0, 0, 0, 0, 0, 4, 4, 4, 12, 0,
0, 0, 0, 8, 0, 0, 4, 8, 0, 8, 0, 0, 8, 0, 0, 0, 0, 4, 16, 4, 16, 8,
0, 0, 0, 4, 0, 4, 0, 0, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
copy_data(ep, dataEp);
copy_data(input, dataInput);
auto C = make_shared<op::Parameter>(element::i32, maxpool_shape);
auto df = autodiff::backprop_function(f);
auto external = manager->compile(df);
auto cf = backend->make_call_frame(external);
cf->tensor_call({input, ep}, {output});
ASSERT_TRUE(output->get_vector<int>() == expected);
}
TEST(${BACKEND_NAME}, backwards_maxpool_n2_c1_hw5_3x3_str2_max)
{
auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend();
auto shape_a = Shape{1, 5, 5, 2}; //in CHWN
auto maxpool_shape = Shape{1, 2, 2, 2};
auto A = make_shared<op::Parameter>(element::i32, shape_a);
auto reshape = make_shared<op::Reshape>(
A, AxisVector{0, 3, 1, 2}, Shape{1, 2, 5, 5}); //convert CHWN to CNHW
auto window_shape = Shape{3, 3};
auto window_movement_strides = Strides{2, 2};
auto maxpool = make_shared<op::MaxPool>(reshape, window_shape, window_movement_strides);
auto f = make_shared<Function>(maxpool, op::Parameters{A});
shared_ptr<runtime::TensorView> ep =
backend->make_primary_tensor_view(element::i32, maxpool_shape);
vector<int> dataEp(shape_size(maxpool_shape), 4);
shared_ptr<runtime::TensorView> input =
backend->make_primary_tensor_view(element::i32, shape_a);
shared_ptr<runtime::TensorView> output =
backend->make_primary_tensor_view(element::i32, shape_a);
vector<int> dataInput{58, 15, 51, 35, 18, 47, 31, 32, 52, 21, 36, 38, 57, 54, 25, 45, 23,
30, 16, 27, 48, 20, 41, 37, 43, 39, 22, 28, 33, 29, 12, 17, 44, 42,
19, 40, 10, 46, 34, 53, 26, 55, 50, 13, 24, 14, 49, 56, 59, 11};
vector<int> expected{//delta
4, 0, 0, 0, 0, 4, 0, 0, 4, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 4, 4, 0};
copy_data(ep, dataEp);
copy_data(input, dataInput);
auto C = make_shared<op::Parameter>(element::i32, maxpool_shape);
auto df = autodiff::backprop_function(f);
auto external = manager->compile(df);
auto cf = backend->make_call_frame(external);
cf->tensor_call({input, ep}, {output});
ASSERT_TRUE(output->get_vector<int>() == expected);
}
TEST(${BACKEND_NAME}, backwards_abs)
{
auto manager = runtime::Manager::get("${BACKEND_NAME}");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment