Unverified Commit 7a7e27d7 authored by Adam Procter's avatar Adam Procter Committed by GitHub

CPU backprop tests (#456)

* Enable CPU backprop tests

* Fix to dot codegen for cases where n_reduction_axes != 1
parent 9ebe7a0c
......@@ -192,6 +192,8 @@ void runtime::cpu::CPU_Emitter::EmitDot(codegen::CodeWriter& writer,
const vector<runtime::cpu::TensorViewWrapper>& args,
const vector<runtime::cpu::TensorViewWrapper>& out)
{
const ngraph::op::Dot* dot = static_cast<const ngraph::op::Dot*>(n);
const Shape& arg0_shape = args[0].get_shape();
const Shape& arg1_shape = args[1].get_shape();
if (arg0_shape.empty() || arg1_shape.empty())
......@@ -206,7 +208,8 @@ void runtime::cpu::CPU_Emitter::EmitDot(codegen::CodeWriter& writer,
writer.indent--;
writer << "}\n";
}
else if ((arg0_shape.size() == 1) && (arg1_shape.size() == 1))
else if ((arg0_shape.size() == 1) && (arg1_shape.size() == 1) &&
dot->get_reduction_axes_count() == 1)
{
writer << "{ // " << n->get_name() << "\n";
writer.indent++;
......@@ -215,7 +218,8 @@ void runtime::cpu::CPU_Emitter::EmitDot(codegen::CodeWriter& writer,
writer.indent--;
writer << "}\n";
}
else if ((arg0_shape.size() == 2) && (arg1_shape.size() == 1))
else if ((arg0_shape.size() == 2) && (arg1_shape.size() == 1) &&
dot->get_reduction_axes_count() == 1)
{
writer << "{ // " << n->get_name() << "\n";
writer.indent++;
......@@ -224,7 +228,8 @@ void runtime::cpu::CPU_Emitter::EmitDot(codegen::CodeWriter& writer,
writer.indent--;
writer << "}\n";
}
else if ((arg0_shape.size() == 2) && (arg1_shape.size() == 2))
else if ((arg0_shape.size() == 2) && (arg1_shape.size() == 2) &&
dot->get_reduction_axes_count() == 1)
{
// Emit an MKL SGEMM call if possible
// clang-format off
......@@ -255,8 +260,6 @@ void runtime::cpu::CPU_Emitter::EmitDot(codegen::CodeWriter& writer,
}
else
{
const ngraph::op::Dot* dot = static_cast<const ngraph::op::Dot*>(n);
writer << "kernel::dot(" << args[0].get_name() << ",\n";
writer << " " << args[1].get_name() << ",\n";
writer << " " << out[0].get_name() << ",\n";
......
......@@ -99,15 +99,23 @@ foreach(BACKEND_NAME ${BACKEND_NAMES})
set(SRC ${SRC} ${CMAKE_CURRENT_BINARY_DIR}/backend_test_${BACKEND_NAME}.cpp)
set(SRC ${SRC} ${CMAKE_CURRENT_BINARY_DIR}/convolution_test_${BACKEND_NAME}.cpp)
# Some---but not all---autodiff tests go through multiple iterations with
# different random seeds. On the CPU backend this is currently very slow
# because the autodiff tests recompile with each iteration. That behavior
# can be changed, but it's a bit involved, so for the time being we just
# reduce the number of test iterations on non-INTERPRETER backends.
if(${BACKEND_NAME} MATCHES ^INTERPRETER$)
set(TEST_LOOPS 100)
else()
set(TEST_LOOPS 2)
endif()
configure_file(autodiff.in.cpp autodiff_${BACKEND_NAME}.cpp)
set(SRC ${SRC} ${CMAKE_CURRENT_BINARY_DIR}/autodiff_${BACKEND_NAME}.cpp)
message(STATUS "Adding unit test for backend ${BACKEND_NAME}")
endforeach()
# <special case>
# This is a special case as the autodiff test recompile the graph multiple times per
# test which is slow on the CPU
set(BACKEND_NAME "INTERPRETER")
configure_file(autodiff.in.cpp autodiff_${BACKEND_NAME}.cpp)
set(SRC ${SRC} ${CMAKE_CURRENT_BINARY_DIR}/autodiff_${BACKEND_NAME}.cpp)
# </special cast>
include_directories(".")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
......
......@@ -321,7 +321,7 @@ TEST(${BACKEND_NAME}, backwards_avgpool_n2_c2_hw4x4_numeric)
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x = rng.initialize(backend->make_primary_tensor_view(element::f32, shape_a));
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x}, .01f, .01f));
......@@ -344,7 +344,7 @@ TEST(${BACKEND_NAME}, backwards_avgpool_n2_c2_hw4x4_win_2x2_str_1x1_numeric)
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x = rng.initialize(backend->make_primary_tensor_view(element::f32, shape_a));
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x}, .01f, .01f));
......@@ -369,7 +369,7 @@ TEST(${BACKEND_NAME}, backwards_avgpool_n2_c2_hw2x2_win_2x2_str_1x1_padding_nume
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x = rng.initialize(backend->make_primary_tensor_view(element::f32, shape_a));
EXPECT_TRUE(autodiff_numeric_compare<float>(manager, backend, make_graph, {x}, .01f, .01f));
......@@ -393,7 +393,7 @@ TEST(${BACKEND_NAME}, backwards_abs)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x_neg = rng_neg.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -570,7 +570,7 @@ TEST(${BACKEND_NAME}, backwards_ceiling)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x_minusone = rng_minusone.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -602,7 +602,7 @@ TEST(${BACKEND_NAME}, backwards_cos)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x = rng.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -623,7 +623,7 @@ TEST(${BACKEND_NAME}, backwards_cosh)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x = rng.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -837,7 +837,7 @@ TEST(${BACKEND_NAME}, backwards_floor)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x_minusone = rng_minusone.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -1014,7 +1014,7 @@ TEST(${BACKEND_NAME}, backwards_replace_slice)
std::vector<std::shared_ptr<op::Parameter>>{X, Y});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x = rng.initialize(backend->make_primary_tensor_view<float>(shape_x));
auto y = rng.initialize(backend->make_primary_tensor_view<float>(shape_y));
......@@ -1056,7 +1056,7 @@ TEST(${BACKEND_NAME}, backwards_select)
std::vector<std::shared_ptr<op::Parameter>>{X0, X1, X2});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x0 = backend->make_primary_tensor_view(element::boolean, shape);
write_vector(x0, vector<char>{0, 1, 0, 1, 0, 1});
......@@ -1089,7 +1089,7 @@ TEST(${BACKEND_NAME}, backwards_select_nested)
std::vector<std::shared_ptr<op::Parameter>>{X0, X1, X2});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x0 = backend->make_primary_tensor_view(element::boolean, shape);
write_vector(x0, vector<char>{0, 1, 0, 1, 0, 1});
......@@ -1124,7 +1124,7 @@ TEST(${BACKEND_NAME}, backwards_sign)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x_neg = rng_neg.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -1151,7 +1151,7 @@ TEST(${BACKEND_NAME}, backwards_sin)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x = rng.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -1172,7 +1172,7 @@ TEST(${BACKEND_NAME}, backwards_sinh)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x = rng.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -1193,7 +1193,7 @@ TEST(${BACKEND_NAME}, backwards_slice)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x = rng.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -1215,7 +1215,7 @@ TEST(${BACKEND_NAME}, backwards_sqrt)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x = rng.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -1330,7 +1330,7 @@ TEST(${BACKEND_NAME}, backwards_tan)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x_r = rng_r.initialize(backend->make_primary_tensor_view<float>(shape));
......@@ -1357,7 +1357,7 @@ TEST(${BACKEND_NAME}, backwards_tanh)
std::vector<std::shared_ptr<op::Parameter>>{X});
};
for (auto i = 0; i < 100; i++)
for (auto i = 0; i < ${TEST_LOOPS}; i++)
{
auto x = rng.initialize(backend->make_primary_tensor_view<float>(shape));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment