Commit c6672b3d authored by pthoreho's avatar pthoreho

style fix

parent 03f6c0ab
...@@ -2549,19 +2549,20 @@ void runtime::cpu::CPU_Emitter::EMITTER_DECL(EmitMaxPoolBackprop) ...@@ -2549,19 +2549,20 @@ void runtime::cpu::CPU_Emitter::EMITTER_DECL(EmitMaxPoolBackprop)
//---------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------
// create a forward primitive_desc, use this to query the workspace // create a forward primitive_desc, use this to query the workspace
// FIXME: (pruthvi) this is a workaround, till we maintain a global context to refer to the corrosponding // FIXME: (pruthvi) this is a workaround, till we maintain a global context to refer to the corrosponding
// MKLDNN kernel. // MKLDNN kernel.
writer << "memory::desc max_pool_input_desc = memory::desc({" << join(args[0].get_shape()) << "}, " << et writer << "memory::desc max_pool_input_desc = memory::desc({" << join(args[0].get_shape())
<< ", memory::format::nchw);\n"; << "}, " << et << ", memory::format::nchw);\n";
writer << "memory::desc max_pool_result_desc = memory::desc({" << join(args[1].get_shape()) << "}, " << et writer << "memory::desc max_pool_result_desc = memory::desc({" << join(args[1].get_shape())
<< ", memory::format::nchw);\n"; << "}, " << et << ", memory::format::nchw);\n";
writer << "memory maxpool_input_data = memory({max_pool_input_desc, cpu_engine}, " << args[0].get_name() writer << "memory maxpool_input_data = memory({max_pool_input_desc, cpu_engine}, "
<< ");\n"; << args[0].get_name() << ");\n";
writer << "memory maxpool_result = memory({max_pool_result_desc, cpu_engine}, " << out[0].get_name() writer << "memory maxpool_result = memory({max_pool_result_desc, cpu_engine}, "
<< ");\n"; << out[0].get_name() << ");\n";
writer << "pooling_forward::primitive_desc pool_fwd_pd = pooling_forward::primitive_desc(" writer << "pooling_forward::primitive_desc pool_fwd_pd = pooling_forward::primitive_desc("
<< "{prop_kind::forward, algorithm::pooling_max, " << "{prop_kind::forward, algorithm::pooling_max, "
<< "max_pool_input_desc, max_pool_result_desc, {" << join(max_pool_fprop_op->get_window_movement_strides()) << "max_pool_input_desc, max_pool_result_desc, {"
<< "}, {" << join(max_pool_fprop_op->get_window_shape()) << "}, " << join(max_pool_fprop_op->get_window_movement_strides()) << "}, {"
<< join(max_pool_fprop_op->get_window_shape()) << "}, "
<< "{" << join(max_pool_fprop_op->get_padding_below()) << "}, " << "{" << join(max_pool_fprop_op->get_padding_below()) << "}, "
<< "{" << join(max_pool_fprop_op->get_padding_above()) << "}, " << "{" << join(max_pool_fprop_op->get_padding_above()) << "}, "
<< "padding_kind::zero}, cpu_engine);\n"; << "padding_kind::zero}, cpu_engine);\n";
......
...@@ -40,12 +40,12 @@ namespace ngraph ...@@ -40,12 +40,12 @@ namespace ngraph
const std::unordered_set<std::type_index> s_op_registry{ const std::unordered_set<std::type_index> s_op_registry{
TI(ngraph::op::AvgPool), TI(ngraph::op::AvgPool),
TI(ngraph::op::AvgPoolBackprop), TI(ngraph::op::AvgPoolBackprop),
TI(ngraph::op::BatchNorm),
TI(ngraph::op::Convolution), TI(ngraph::op::Convolution),
TI(ngraph::op::ConvolutionBackpropData), TI(ngraph::op::ConvolutionBackpropData),
TI(ngraph::op::ConvolutionBackpropFilters), TI(ngraph::op::ConvolutionBackpropFilters),
TI(ngraph::op::MaxPool), TI(ngraph::op::MaxPool),
TI(ngraph::op::MaxPoolBackprop), TI(ngraph::op::MaxPoolBackprop)};
TI(ngraph::op::BatchNorm)};
bool IsMKLDNNOp(ngraph::Node& op) bool IsMKLDNNOp(ngraph::Node& op)
{ {
......
...@@ -1410,7 +1410,6 @@ TEST(${BACKEND_NAME}, backwards_maxpool_n4c1h4w4_kh2kw2_sh1sw1) ...@@ -1410,7 +1410,6 @@ TEST(${BACKEND_NAME}, backwards_maxpool_n4c1h4w4_kh2kw2_sh1sw1)
{ {
auto manager = runtime::Manager::get("${BACKEND_NAME}"); auto manager = runtime::Manager::get("${BACKEND_NAME}");
auto backend = manager->allocate_backend(); auto backend = manager->allocate_backend();
pass::Manager pass_manager;
Shape shape_a{1, 4, 4, 4}; //in CHWN Shape shape_a{1, 4, 4, 4}; //in CHWN
Shape maxpool_shape{4, 1, 3, 3}; Shape maxpool_shape{4, 1, 3, 3};
...@@ -1431,21 +1430,20 @@ TEST(${BACKEND_NAME}, backwards_maxpool_n4c1h4w4_kh2kw2_sh1sw1) ...@@ -1431,21 +1430,20 @@ TEST(${BACKEND_NAME}, backwards_maxpool_n4c1h4w4_kh2kw2_sh1sw1)
backend->make_primary_tensor_view(element::f32, shape_a); backend->make_primary_tensor_view(element::f32, shape_a);
vector<float> dataInput{11, 65, 44, 28, 31, 33, 21, 66, 40, 49, 69, 57, 47, 30, 24, 27, vector<float> dataInput{11, 65, 44, 28, 31, 33, 21, 66, 40, 49, 69, 57, 47, 30, 24, 27,
13, 56, 46, 60, 61, 41, 25, 42, 48, 53, 51, 43, 59, 58, 29, 71, 13, 56, 46, 60, 61, 41, 25, 42, 48, 53, 51, 43, 59, 58, 29, 71,
17, 22, 72, 18, 39, 35, 15, 38, 64, 52, 73, 67, 62, 50, 10, 68, 17, 22, 72, 18, 39, 35, 15, 38, 64, 52, 73, 67, 62, 50, 10, 68,
45, 63, 16, 14, 55, 54, 37, 20, 36, 12, 70, 34, 19, 26, 32, 23}; 45, 63, 16, 14, 55, 54, 37, 20, 36, 12, 70, 34, 19, 26, 32, 23};
vector<float> expected{//delta vector<float> expected{//delta
0, 4, 0, 0, 0, 0, 0, 8, 0, 0, 8, 0, 0, 0, 0, 0, 0, 4, 4, 4, 12, 0, 0, 4, 0, 0, 0, 0, 0, 8, 0, 0, 8, 0, 0, 0, 0, 0, 0, 4, 4, 4, 12, 0,
0, 0, 0, 8, 0, 0, 4, 8, 0, 8, 0, 0, 8, 0, 0, 0, 0, 4, 16, 4, 16, 8, 0, 0, 0, 8, 0, 0, 4, 8, 0, 8, 0, 0, 8, 0, 0, 0, 0, 4, 16, 4, 16, 8,
0, 0, 0, 4, 0, 4, 0, 0, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; 0, 0, 0, 4, 0, 4, 0, 0, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
copy_data(ep, dataEp); copy_data(ep, dataEp);
copy_data(input, dataInput); copy_data(input, dataInput);
auto C = make_shared<op::Parameter>(element::f32, maxpool_shape); auto C = make_shared<op::Parameter>(element::f32, maxpool_shape);
auto df = autodiff::backprop_function(f); auto df = autodiff::backprop_function(f);
pass_manager.run_passes(df);
auto external = manager->compile(df); auto external = manager->compile(df);
auto cf = backend->make_call_frame(external); auto cf = backend->make_call_frame(external);
cf->tensor_call({input, ep}, {output}); cf->tensor_call({input, ep}, {output});
...@@ -1479,12 +1477,13 @@ TEST(${BACKEND_NAME}, backwards_maxpool_n2c1h5w5_kh3kw3_sh2sw2) ...@@ -1479,12 +1477,13 @@ TEST(${BACKEND_NAME}, backwards_maxpool_n2c1h5w5_kh3kw3_sh2sw2)
backend->make_primary_tensor_view(element::f32, shape_a); backend->make_primary_tensor_view(element::f32, shape_a);
vector<float> dataInput{58, 15, 51, 35, 18, 47, 31, 32, 52, 21, 36, 38, 57, 54, 25, 45, 23, vector<float> dataInput{58, 15, 51, 35, 18, 47, 31, 32, 52, 21, 36, 38, 57, 54, 25, 45, 23,
30, 16, 27, 48, 20, 41, 37, 43, 39, 22, 28, 33, 29, 12, 17, 44, 42, 30, 16, 27, 48, 20, 41, 37, 43, 39, 22, 28, 33, 29, 12, 17, 44, 42,
19, 40, 10, 46, 34, 53, 26, 55, 50, 13, 24, 14, 49, 56, 59, 11}; 19, 40, 10, 46, 34, 53, 26, 55, 50, 13, 24, 14, 49, 56, 59, 11};
vector<float> expected{//delta vector<float> expected{//delta
4, 0, 0, 0, 0, 4, 0, 0, 4, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 4, 0, 0, 4, 0, 0, 0, 0, 4, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 4, 4, 0}; 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 4, 4, 0};
copy_data(ep, dataEp); copy_data(ep, dataEp);
copy_data(input, dataInput); copy_data(input, dataInput);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment