Commit f7b13dc4 authored by Amy Zhuang's avatar Amy Zhuang Committed by Scott Cyphers

Move zero padded conv fusions from CPUFusion to CoreFusion. (#2969)

* Move zero padded conv fusions from CPUFusion to CoreFusion.

* Address PR feedback: move unit tests to core_fusion.
parent 49baa903
This diff is collapsed.
...@@ -42,6 +42,9 @@ public: ...@@ -42,6 +42,9 @@ public:
construct_optimized_strided_conv(); construct_optimized_strided_conv();
construct_reshape_broadcast(); construct_reshape_broadcast();
construct_reshape_softmax_reshape(); construct_reshape_softmax_reshape();
construct_zero_padded_reshaped_conv();
construct_zero_padded_conv();
construct_zero_padded_conv_backprop_filters();
} }
// Patterns under FOP_FUSIONS create ops (FusedOps) that might not // Patterns under FOP_FUSIONS create ops (FusedOps) that might not
// be all supported by certain backends. In such a case, backends // be all supported by certain backends. In such a case, backends
...@@ -61,6 +64,9 @@ public: ...@@ -61,6 +64,9 @@ public:
void construct_optimized_strided_conv(); void construct_optimized_strided_conv();
void construct_reshape_broadcast(); void construct_reshape_broadcast();
void construct_reshape_softmax_reshape(); void construct_reshape_softmax_reshape();
void construct_zero_padded_reshaped_conv();
void construct_zero_padded_conv();
void construct_zero_padded_conv_backprop_filters();
void construct_conv_bias(); void construct_conv_bias();
void construct_conv_bias_add(); void construct_conv_bias_add();
}; };
This diff is collapsed.
...@@ -53,9 +53,6 @@ public: ...@@ -53,9 +53,6 @@ public:
construct_matmul(); construct_matmul();
construct_matmulbias(); construct_matmulbias();
construct_fprop_bn(); construct_fprop_bn();
construct_zero_padded_reshaped_conv();
construct_zero_padded_conv();
construct_zero_padded_conv_backprop_filters();
construct_conv_bias_bprop(); construct_conv_bias_bprop();
construct_conv_bias_folded_batch_norm(); construct_conv_bias_folded_batch_norm();
construct_conv_bias_affine_folding(); construct_conv_bias_affine_folding();
...@@ -90,9 +87,6 @@ private: ...@@ -90,9 +87,6 @@ private:
void construct_conv_bias_bprop(); void construct_conv_bias_bprop();
void construct_fprop_bn(); void construct_fprop_bn();
void construct_sigmoid_multiply(); void construct_sigmoid_multiply();
void construct_zero_padded_reshaped_conv();
void construct_zero_padded_conv();
void construct_zero_padded_conv_backprop_filters();
void construct_batch_norm_relu(); void construct_batch_norm_relu();
void construct_batch_norm_relu_global_stats(); void construct_batch_norm_relu_global_stats();
void construct_conv_relu(); void construct_conv_relu();
......
...@@ -314,6 +314,121 @@ TEST(core_fusion, reshape_softmax_reshape) ...@@ -314,6 +314,121 @@ TEST(core_fusion, reshape_softmax_reshape)
EXPECT_TRUE(test::all_close(baseline_results.at(0), optimized_results.at(0))); EXPECT_TRUE(test::all_close(baseline_results.at(0), optimized_results.at(0)));
} }
TEST(core_fusion, zero_padded_reshaped_conv)
{
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2, 1});
auto F = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1, 1});
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{0.0f});
auto pad =
make_shared<op::Pad>(X, pad_value, CoordinateDiff{0, 1, 0, 0}, CoordinateDiff{0, 0, 1, 0});
auto reshape = make_shared<op::Reshape>(pad, AxisVector{0, 3, 1, 2}, Shape{1, 1, 3, 3});
auto conv = make_shared<op::Convolution>(reshape,
F,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto func = make_shared<Function>(conv, ParameterVector{X, F});
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
auto backend = runtime::Backend::create("CPU");
backend->compile(func);
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 0);
}
TEST(core_fusion, zero_padded_conv)
{
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 1, 2, 2});
auto F = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1, 1});
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{0.0f});
auto pad =
make_shared<op::Pad>(X, pad_value, CoordinateDiff{0, 0, 0, 1}, CoordinateDiff{0, 0, 1, 0});
auto conv = make_shared<op::Convolution>(pad,
F,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto func = make_shared<Function>(conv, ParameterVector{X, F});
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
auto backend = runtime::Backend::create("CPU");
backend->compile(func);
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 0);
}
TEST(core_fusion, non_zero_padded_conv)
{
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 1, 2, 2});
auto F = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1, 1});
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{1.0f});
auto pad =
make_shared<op::Pad>(X, pad_value, CoordinateDiff{0, 0, 0, 1}, CoordinateDiff{0, 0, 1, 0});
auto conv = make_shared<op::Convolution>(pad,
F,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto func = make_shared<Function>(conv, ParameterVector{X, F});
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
auto backend = runtime::Backend::create("CPU");
backend->compile(func);
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
}
TEST(core_fusion, zero_padded_conv_backprop_filters)
{
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 1, 2, 2});
auto F = make_shared<op::Parameter>(element::f32, Shape{1, 1, 2, 2});
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{0.0f});
auto pad =
make_shared<op::Pad>(X, pad_value, CoordinateDiff{0, 0, 0, 1}, CoordinateDiff{0, 0, 1, 0});
auto conv = make_shared<op::ConvolutionBackpropFilters>(pad,
Shape{1, 1, 2, 2},
F,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto func = make_shared<Function>(conv, ParameterVector{X, F});
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
auto backend = runtime::Backend::create("CPU");
backend->compile(func);
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 0);
}
TEST(core_fusion, conv_bias) TEST(core_fusion, conv_bias)
{ {
auto gen_f = [](bool with_fused_op) { auto gen_f = [](bool with_fused_op) {
......
...@@ -359,121 +359,6 @@ TEST(cpu_fusion, cpu_fusion_pass_matmul_no_bias) ...@@ -359,121 +359,6 @@ TEST(cpu_fusion, cpu_fusion_pass_matmul_no_bias)
ASSERT_EQ(mmb, 1); ASSERT_EQ(mmb, 1);
} }
TEST(cpu_fusion, zero_padded_reshaped_conv)
{
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2, 1});
auto F = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1, 1});
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{0.0f});
auto pad =
make_shared<op::Pad>(X, pad_value, CoordinateDiff{0, 1, 0, 0}, CoordinateDiff{0, 0, 1, 0});
auto reshape = make_shared<op::Reshape>(pad, AxisVector{0, 3, 1, 2}, Shape{1, 1, 3, 3});
auto conv = make_shared<op::Convolution>(reshape,
F,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto func = make_shared<Function>(conv, ParameterVector{X, F});
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
auto backend = runtime::Backend::create("CPU");
backend->compile(func);
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 0);
}
TEST(cpu_fusion, zero_padded_conv)
{
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 1, 2, 2});
auto F = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1, 1});
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{0.0f});
auto pad =
make_shared<op::Pad>(X, pad_value, CoordinateDiff{0, 0, 0, 1}, CoordinateDiff{0, 0, 1, 0});
auto conv = make_shared<op::Convolution>(pad,
F,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto func = make_shared<Function>(conv, ParameterVector{X, F});
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
auto backend = runtime::Backend::create("CPU");
backend->compile(func);
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 0);
}
TEST(cpu_fusion, non_zero_padded_conv)
{
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 1, 2, 2});
auto F = make_shared<op::Parameter>(element::f32, Shape{1, 1, 1, 1});
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{1.0f});
auto pad =
make_shared<op::Pad>(X, pad_value, CoordinateDiff{0, 0, 0, 1}, CoordinateDiff{0, 0, 1, 0});
auto conv = make_shared<op::Convolution>(pad,
F,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto func = make_shared<Function>(conv, ParameterVector{X, F});
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
auto backend = runtime::Backend::create("CPU");
backend->compile(func);
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
}
TEST(cpu_fusion, zero_padded_conv_backprop_filters)
{
auto X = make_shared<op::Parameter>(element::f32, Shape{1, 1, 2, 2});
auto F = make_shared<op::Parameter>(element::f32, Shape{1, 1, 2, 2});
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{0.0f});
auto pad =
make_shared<op::Pad>(X, pad_value, CoordinateDiff{0, 0, 0, 1}, CoordinateDiff{0, 0, 1, 0});
auto conv = make_shared<op::ConvolutionBackpropFilters>(pad,
Shape{1, 1, 2, 2},
F,
Strides{1, 1},
Strides{1, 1},
CoordinateDiff{0, 0},
CoordinateDiff{0, 0},
Strides{1, 1});
auto func = make_shared<Function>(conv, ParameterVector{X, F});
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 1);
auto backend = runtime::Backend::create("CPU");
backend->compile(func);
ASSERT_EQ(count_ops_of_type<op::Pad>(func), 0);
}
struct ConvolutionBiasTestData struct ConvolutionBiasTestData
{ {
size_t n{0}; size_t n{0};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment