Commit 81fe53cd authored by Louis Feng's avatar Louis Feng

test wip.

parent 9a1cbd9d
......@@ -28,6 +28,7 @@
#include "ngraph/ops/relu.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
#include "ngraph/runtime/cpu/ops/conv_bias.hpp"
#include "ngraph/types/element_type.hpp"
#include "mkldnn_utils.hpp"
......@@ -44,8 +45,10 @@ static const std::unordered_set<std::type_index> s_op_registry{
TI(ngraph::op::AvgPoolBackprop),
TI(ngraph::op::BatchNorm),
TI(ngraph::op::Convolution),
TI(ngraph::op::ConvolutionBias),
TI(ngraph::op::ConvolutionBackpropData),
TI(ngraph::op::ConvolutionBackpropFilters),
TI(ngraph::op::ConvolutionBiasBackpropFiltersBias),
TI(ngraph::op::MaxPool),
TI(ngraph::op::MaxPoolBackprop),
TI(ngraph::op::Relu),
......
......@@ -31,6 +31,7 @@
#include "ngraph/ops/relu.hpp"
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp"
#include "ngraph/runtime/cpu/ops/conv_bias.hpp"
using namespace std;
using namespace ngraph;
......@@ -97,6 +98,33 @@ namespace ngraph
}
}
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionBias)
{
auto convolution = static_cast<op::ConvolutionBias*>(node);
auto arg0_shape = node->get_input_shape(0);
auto arg1_shape = node->get_input_shape(1);
auto result_shape = node->get_output_shape(0);
auto arg0_rank = arg0_shape.size();
auto arg1_rank = arg1_shape.size();
bool data_dilated = false;
for (size_t s : convolution->get_data_dilation_strides())
{
data_dilated = data_dilated || (s != 1);
}
if (!data_dilated && arg0_rank == 4 && arg1_rank == 4 &&
node->get_input_element_type(0) == element::f32)
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
op_annotations->set_mkldnn_op(true);
convolution->set_op_annotations(op_annotations);
}
}
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionBackpropData)
{
......@@ -151,6 +179,34 @@ namespace ngraph
}
}
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::ConvolutionBiasBackpropFiltersBias)
{
auto convolution = static_cast<op::ConvolutionBiasBackpropFiltersBias*>(node);
auto arg0_shape = node->get_input_shape(0);
auto arg1_shape = node->get_input_shape(1);
auto filters_shape = node->get_output_shape(0);
auto bias_shape = node->get_output_shape(1);
auto arg0_rank = arg0_shape.size();
auto arg1_rank = arg1_shape.size();
bool data_dilated = false;
for (size_t s : convolution->get_data_dilation_strides_forward())
{
data_dilated = data_dilated || (s != 1);
}
if (!data_dilated && arg0_rank == 4 && arg1_rank == 4 &&
node->get_input_element_type(0) == element::f32)
{
auto op_annotations =
std::make_shared<ngraph::runtime::cpu::CPUOpAnnotations>();
op_annotations->set_mkldnn_op(true);
convolution->set_op_annotations(op_annotations);
}
}
template <>
void CPUAssignment::ASSIGN_DECL(ngraph::op::AvgPool)
{
......@@ -235,6 +291,8 @@ static const runtime::cpu::pass::AssignOpMap s_dispatcher{
{TI(ngraph::op::Add), &runtime::cpu::pass::CPUAssignment::assign<ngraph::op::Add>},
{TI(ngraph::op::Convolution),
&runtime::cpu::pass::CPUAssignment::assign<ngraph::op::Convolution>},
{TI(ngraph::op::ConvolutionBias),
&runtime::cpu::pass::CPUAssignment::assign<ngraph::op::ConvolutionBias>},
{TI(ngraph::op::ConvolutionBackpropData),
&runtime::cpu::pass::CPUAssignment::assign<ngraph::op::ConvolutionBackpropData>},
{TI(ngraph::op::ConvolutionBackpropFilters),
......
......@@ -324,3 +324,147 @@ TEST(cpu_fusion, fuse_conv_bias)
ASSERT_GT(cb, 0);
}
TEST(cpu_fusion, conv_bias_fprop)
{
const int n = 1;
const int c = 1;
const int filter = 1;
const int kernel_size = 3;
const int w = 3;
const int h = w;
auto data_shape = Shape{n, c, h, w};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto weights_shape = Shape{filter, c, kernel_size, kernel_size};
auto weights = make_shared<op::Parameter>(element::f32, weights_shape);
auto bias_shape = Shape{filter};
auto bias = make_shared<op::Parameter>(element::f32, bias_shape);
auto convolution= make_shared<op::Convolution>(data, weights);
auto convolution_bias = make_shared<op::ConvolutionBias>(convolution, bias);
auto f = make_shared<Function>(convolution_bias, op::ParameterVector{data, weights, bias});
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto _data = backend->make_primary_tensor_view(element::f32, data_shape);
copy_data(_data, vector<float>{-0.67765152, 0.10073948, 0.57595438,
-0.3469252, -0.22134334, -1.80471897,
-0.80642909, 1.22033095, 2.23235631});
auto _weights = backend->make_primary_tensor_view(element::f32, weights_shape);
copy_data(_weights, vector<float>{1,1,1,
1,1,1,
1,1,1});
auto _bias = backend->make_primary_tensor_view(element::f32, bias_shape);
copy_data(_bias, vector<float>{1});
auto result_shape = Shape{1};
auto result = backend->make_primary_tensor_view(element::f32, result_shape);
// vector<float> expected_result{-0.71498716f,
// 1.48388731f,
// -0.00196938f,
// -0.76693159f,
// -0.91316032f,
// 0.23943391f,
// -0.84090298f,
// 1.51462936f};
cf->call({_data, _weights, _bias}, {result});
auto result_vec = read_vector<float>(result);
for (size_t i = 0; i < result_vec.size(); ++i) {
std::cout << result_vec[i] << " ";
}
std::cout << std::endl;
//EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
}
TEST(cpu_fusion, conv_bias_bprop)
{
const int n = 1;
const int c = 1;
const int filter = 1;
const int kernel_size = 3;
const int w = 3;
const int h = w;
auto data_shape = Shape{n, c, h, w};
auto data = make_shared<op::Parameter>(element::f32, data_shape);
auto weights_shape = Shape{filter, c, kernel_size, kernel_size};
auto weights = make_shared<op::Parameter>(element::f32, weights_shape);
auto bias_shape = Shape{filter};
auto bias = make_shared<op::Parameter>(element::f32, bias_shape);
auto convolution= make_shared<op::Convolution>(data, weights);
auto convolution_bias = make_shared<op::ConvolutionBias>(convolution, bias);
auto result_shape = Shape{1};
auto f = make_shared<Function>(convolution_bias, op::ParameterVector{data, weights, bias});
auto manager = runtime::Manager::get("CPU");
auto backend = manager->allocate_backend();
auto delta = std::make_shared<op::Parameter>(element::f32, result_shape);
auto d_data = convolution_bias->backprop_node(data, delta);
auto d_weights = convolution_bias->backprop_node(weights, delta);
auto d_bias = convolution_bias->backprop_node(bias, delta);
auto df = make_shared<Function>(NodeVector{d_data, d_weights, d_bias},
op::ParameterVector{data, weights, bias, delta});
auto external = manager->compile(df);
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto _data = backend->make_primary_tensor_view(element::f32, data_shape);
copy_data(_data, vector<float>{-0.67765152, 0.10073948, 0.57595438,
-0.3469252, -0.22134334, -1.80471897,
-0.80642909, 1.22033095, 2.23235631});
auto _weights = backend->make_primary_tensor_view(element::f32, weights_shape);
copy_data(_weights, vector<float>{1,1,1,
1,1,1,
1,1,1});
auto _bias = backend->make_primary_tensor_view(element::f32, bias_shape);
copy_data(_bias, vector<float>{1});
auto _delta = backend->make_primary_tensor_view(element::f32, data_shape);
copy_data(_delta, vector<float>{1.27231});
// results
auto _d_data = backend->make_primary_tensor_view(element::f32, data_shape);
copy_data(_d_data, vector<float>{0,0,0,
0,0,0,
0,0,0});
auto _d_weights = backend->make_primary_tensor_view(element::f32, weights_shape);
copy_data(_d_weights, vector<float>{0,0,0,
0,0,0,
0,0,0});
auto _d_bias = backend->make_primary_tensor_view(element::f32, bias_shape);
copy_data(_d_bias, vector<float>{0});
// vector<float> expected_result{-0.71498716f,
// 1.48388731f,
// -0.00196938f,
// -0.76693159f,
// -0.91316032f,
// 0.23943391f,
// -0.84090298f,
// 1.51462936f};
cf->call({_data, _weights, _bias, _delta}, {_d_data, _d_weights, _d_bias});
auto result_vec = read_vector<float>(_d_data);
for (size_t i = 0; i < result_vec.size(); ++i) {
std::cout << result_vec[i] << " ";
}
result_vec = read_vector<float>(_d_weights);
for (size_t i = 0; i < result_vec.size(); ++i) {
std::cout << result_vec[i] << " ";
}
result_vec = read_vector<float>(_d_bias);
for (size_t i = 0; i < result_vec.size(); ++i) {
std::cout << result_vec[i] << " ";
}
std::cout << std::endl;
//EXPECT_TRUE(test::all_close(expected_result, read_vector<float>(result)));
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment