Commit bb5c7f07 authored by Nishant Patel's avatar Nishant Patel Committed by Scott Cyphers

Support 3-D pool with mkldnn (#1079)

* Support 3-D pool with mkldnn

* Move execute() to test_tools.hpp
parent faad7d1b
......@@ -2958,15 +2958,14 @@ namespace ngraph
auto max_pool = static_cast<const ngraph::op::MaxPool*>(node);
auto arg_shape = args[0].get_shape();
auto arg_rank = arg_shape.size();
auto result_shape = out[0].get_shape();
// TODO(jmenon): Optimize for 1D
// TODO(jmenon): Remove element type restriction
if (arg_rank == 4 && max_pool->get_window_shape().size() == 2 &&
args[0].get_element_type() == element::f32)
if (runtime::cpu::mkldnn_utils::use_mkldnn_kernel(node))
{
auto& mkldnn_emitter = external_function->get_mkldnn_emitter();
auto input_desc = mkldnn_emitter->build_memory_descriptor(
......
......@@ -300,7 +300,8 @@ namespace ngraph
auto arg0_rank = arg0_shape.size();
auto result_shape = node->get_output_shape(0);
if (arg0_rank == 4 && avg_pool->get_window_shape().size() == 2 &&
if (((arg0_rank == 4 && avg_pool->get_window_shape().size() == 2) ||
(arg0_rank == 5 && avg_pool->get_window_shape().size() == 3)) &&
node->get_input_element_type(0) == element::f32)
{
auto op_annotations =
......@@ -319,7 +320,8 @@ namespace ngraph
auto arg0_rank = arg0_shape.size();
auto result_shape = node->get_output_shape(0);
if (arg0_rank == 4 && avg_pool->get_window_shape().size() == 2 &&
if (((arg0_rank == 4 && avg_pool->get_window_shape().size() == 2) ||
(arg0_rank == 5 && avg_pool->get_window_shape().size() == 3)) &&
node->get_input_element_type(0) == element::f32)
{
auto op_annotations =
......@@ -338,7 +340,8 @@ namespace ngraph
auto arg0_rank = arg0_shape.size();
auto result_shape = node->get_output_shape(0);
if (arg0_rank == 4 && max_pool->get_window_shape().size() == 2 &&
if (((arg0_rank == 4 && max_pool->get_window_shape().size() == 2) ||
(arg0_rank == 5 && max_pool->get_window_shape().size() == 3)) &&
node->get_input_element_type(0) == element::f32)
{
auto op_annotations =
......@@ -376,7 +379,8 @@ namespace ngraph
auto arg1_rank = arg1_shape.size();
auto result_shape = node->get_output_shape(0);
if (arg1_rank == 4 && max_pool->get_window_shape().size() == 2 &&
if (((arg1_rank == 4 && max_pool->get_window_shape().size() == 2) ||
(arg1_rank == 5 && max_pool->get_window_shape().size() == 3)) &&
node->get_input_element_type(1) == element::f32)
{
auto op_annotations =
......
......@@ -30,6 +30,7 @@
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/random.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
......@@ -4462,6 +4463,39 @@ NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_1channel_1image_strided)
read_vector<float>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, max_pool_3d)
{
Shape shape_a{64, 3, 7, 8, 10};
Shape window_shape{2, 3, 2};
auto move_strides = Strides{2, 3, 4};
Shape padding_below{5, 6, 4};
Shape padding_above{6, 4, 5};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_a);
auto cpu_f = make_shared<Function>(
make_shared<op::MaxPool>(A, window_shape, move_strides, padding_below, padding_above),
op::ParameterVector{A});
auto int_f = make_shared<Function>(
make_shared<op::MaxPool>(B, window_shape, move_strides, padding_below, padding_above),
op::ParameterVector{B});
test::Uniform<float> rng(0.0f, 1.0f);
vector<vector<float>> args;
for (shared_ptr<op::Parameter> param : int_f->get_parameters())
{
vector<float> tensor_val(shape_size(param->get_shape()));
rng.initialize(tensor_val);
args.push_back(tensor_val);
}
auto int_results = execute(int_f, args, "INTERPRETER");
auto cpu_results = execute(cpu_f, args, "CPU");
for (size_t i = 0; i < cpu_results.size(); i++)
{
EXPECT_TRUE(test::all_close(cpu_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f));
}
}
NGRAPH_TEST(${BACKEND_NAME}, not)
{
Shape shape{2, 2};
......@@ -6254,6 +6288,39 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_2channel_2image_padded_3x3_strided_unev
read_vector<float>(result)));
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_3d)
{
Shape shape_a{64, 3, 7, 8, 10};
Shape window_shape{2, 3, 2};
auto move_strides = Strides{2, 3, 4};
Shape padding_below{5, 6, 4};
Shape padding_above{6, 4, 5};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto B = make_shared<op::Parameter>(element::f32, shape_a);
auto cpu_f = make_shared<Function>(
make_shared<op::AvgPool>(A, window_shape, move_strides, padding_below, padding_above, true),
op::ParameterVector{A});
auto int_f = make_shared<Function>(
make_shared<op::AvgPool>(B, window_shape, move_strides, padding_below, padding_above, true),
op::ParameterVector{B});
test::Uniform<float> rng(0.0f, 1.0f);
vector<vector<float>> args;
for (shared_ptr<op::Parameter> param : int_f->get_parameters())
{
vector<float> tensor_val(shape_size(param->get_shape()));
rng.initialize(tensor_val);
args.push_back(tensor_val);
}
auto int_results = execute(int_f, args, "INTERPRETER");
auto cpu_results = execute(cpu_f, args, "CPU");
for (size_t i = 0; i < cpu_results.size(); i++)
{
EXPECT_TRUE(test::all_close(cpu_results.at(i), int_results.at(i), 1.0e-4f, 1.0e-4f));
}
}
NGRAPH_TEST(${BACKEND_NAME}, pad_interior_1d)
{
Shape shape_a{6};
......
......@@ -65,9 +65,8 @@
#include "util/autodiff/numeric_compare.hpp"
#include "util/matcher.hpp"
#include "util/random.hpp"
#include "util/test_tools.hpp"
#include "util/random.hpp"
#include "util/test_tools.hpp"
using namespace ngraph;
using namespace std;
......@@ -881,46 +880,6 @@ TEST(cpu_fusion, fuse_conv_relu)
ASSERT_GT(cb, 0);
}
template <typename T>
static std::vector<std::vector<T>>
execute(std::shared_ptr<Function> f, std::vector<std::vector<T>> args, std::string cbackend)
{
auto backend = runtime::Backend::create(cbackend);
auto parms = f->get_parameters();
if (parms.size() != args.size())
{
throw ngraph_error("number of parameters and arguments don't match");
}
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> arg_tensors(args.size());
for (size_t i = 0; i < args.size(); i++)
{
auto t = backend->create_tensor(parms.at(i)->get_element_type(), parms.at(i)->get_shape());
copy_data(t, args.at(i));
arg_tensors.at(i) = t;
}
auto results = f->get_results();
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> result_tensors(results.size());
for (size_t i = 0; i < results.size(); i++)
{
result_tensors.at(i) =
backend->create_tensor(results.at(i)->get_element_type(), results.at(i)->get_shape());
}
backend->call(f, result_tensors, arg_tensors);
std::vector<std::vector<T>> result_vectors;
for (auto rt : result_tensors)
{
result_vectors.push_back(read_vector<T>(rt));
}
return result_vectors;
}
TEST(cpu_fusion, conv_relu_n2c1h2w2_2)
{
Shape shape_a{2, 1, 6, 6};
......
......@@ -23,6 +23,7 @@
#include "ngraph/descriptor/layout/tensor_view_layout.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/serializer.hpp"
......@@ -93,3 +94,44 @@ size_t count_ops_of_type(std::shared_ptr<ngraph::Function> f)
return count;
}
template <typename T>
std::vector<std::vector<T>> execute(std::shared_ptr<ngraph::Function> f,
std::vector<std::vector<T>> args,
std::string cbackend)
{
auto backend = ngraph::runtime::Backend::create(cbackend);
auto parms = f->get_parameters();
if (parms.size() != args.size())
{
throw ngraph::ngraph_error("number of parameters and arguments don't match");
}
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> arg_tensors(args.size());
for (size_t i = 0; i < args.size(); i++)
{
auto t = backend->create_tensor(parms.at(i)->get_element_type(), parms.at(i)->get_shape());
copy_data(t, args.at(i));
arg_tensors.at(i) = t;
}
auto results = f->get_results();
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> result_tensors(results.size());
for (size_t i = 0; i < results.size(); i++)
{
result_tensors.at(i) =
backend->create_tensor(results.at(i)->get_element_type(), results.at(i)->get_shape());
}
backend->call(f, result_tensors, arg_tensors);
std::vector<std::vector<T>> result_vectors;
for (auto rt : result_tensors)
{
result_vectors.push_back(read_vector<T>(rt));
}
return result_vectors;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment